diff --git a/.dockerignore b/.dockerignore
deleted file mode 100644
index 960a2002..00000000
--- a/.dockerignore
+++ /dev/null
@@ -1,21 +0,0 @@
-.git
-.gitignore
-.venv
-__pycache__
-*.pyc
-*.pyo
-*.pyd
-.pytest_cache
-.mypy_cache
-.ruff_cache
-*.egg-info
-dist
-build
-.DS_Store
-.env
-.env.local
-.env.*.local
-node_modules
-*.log
-test_memory.index
-test_memory.json
diff --git a/.flake8 b/.flake8
deleted file mode 100644
index f437738e..00000000
--- a/.flake8
+++ /dev/null
@@ -1,3 +0,0 @@
-[flake8]
-ignore = E501, E704
-max-line-length = 120
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
deleted file mode 100644
index c0d27784..00000000
--- a/.github/workflows/ci.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: CI
-
-on:
- push:
- branches:
- - main
- pull_request:
- types: [opened, synchronize, reopened]
- branches:
- - main
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v3
-
- - name: Set up Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.12'
-
- - name: Install uv
- run: pip install uv
-
- - name: Create virtual environment
- run: uv venv
- - name: Install dependencies
- run: uv pip install -e .[dev]
-
- - name: Lint with ruff
- run: source .venv/bin/activate && ruff check ./src
-
- - name: Type check with mypy
- run: source .venv/bin/activate && mypy ./src
-
- - name: Test with pytest
- run: source .venv/bin/activate && pytest
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
deleted file mode 100644
index f078709f..00000000
--- a/.github/workflows/docs.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-name: Deploy MkDocs to GitHub Pages
-
-on:
- push:
- branches:
- - main
- paths:
- - 'docs/**'
- - 'mkdocs.yml'
- - '.github/workflows/docs.yml'
- pull_request:
- branches:
- - main
- paths:
- - 'docs/**'
- - 'mkdocs.yml'
- - '.github/workflows/docs.yml'
-
-permissions:
- contents: read
- pages: write
- id-token: write
-
-concurrency:
- group: "pages"
- cancel-in-progress: false
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v4
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.12'
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install mkdocs mkdocs-material mkdocstrings[python]
-
- - name: Build documentation
- run: mkdocs build --strict
-
- - name: Upload artifact
- if: github.ref == 'refs/heads/main'
- uses: actions/upload-pages-artifact@v3
- with:
- path: ./site
-
- deploy:
- if: github.ref == 'refs/heads/main'
- environment:
- name: github-pages
- url: ${{ steps.deployment.outputs.page_url }}
- runs-on: ubuntu-latest
- needs: build
- steps:
- - name: Deploy to GitHub Pages
- id: deployment
- uses: actions/deploy-pages@v4
diff --git a/.github/workflows/release-rofl.yml b/.github/workflows/release-rofl.yml
deleted file mode 100644
index de4ba688..00000000
--- a/.github/workflows/release-rofl.yml
+++ /dev/null
@@ -1,79 +0,0 @@
-name: release-rofl
-
-on:
- push:
- tags:
- - "v[0-9]+.[0-9]+*"
- workflow_dispatch:
-
-jobs:
- release-rofl:
- runs-on: ubuntu-latest
- permissions:
- contents: read
- packages: write
- env:
- DEPLOYMENT: mainnet
- SAFE_ADDRESS: "0x70739eB50e269f1f1eb27c6f8932f63389B1Cb63"
- steps:
- - name: Checkout
- uses: actions/checkout@v4
-
- # TODO: Move this to dedicated GitHub Action for CLI install.
- - name: Install Oasis CLI
- env:
- CLI_VERSION: 0.16.0
- run: |
- mkdir -p /tmp/oasis-cli
- cd /tmp/oasis-cli
- wget "https://github.com/oasisprotocol/cli/releases/download/v${CLI_VERSION}/oasis_cli_${CLI_VERSION}_linux_amd64.tar.gz" -O cli.tar.gz
- tar --strip-components 1 -xf cli.tar.gz
- echo "/tmp/oasis-cli" >> ${GITHUB_PATH}
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
-
- - name: Login to ghcr.io
- uses: docker/login-action@v3
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build and push Talos Docker image
- run: |
- OUTPUT_IMAGE_NAME_PATH="/tmp/image-name" ./scripts/build_and_push_container_image.sh
- echo "EXPECTED_TALOS_AGENT_IMAGE=$(cat /tmp/image-name)" >> ${GITHUB_ENV}
-
- - name: Verify image digest matches the compose file
- run: ./scripts/verify_container_image.sh
-
- # run: oasis rofl build --deployment ${DEPLOYMENT}
- - name: Build ROFL ORC for deployment, verify it matches the manifest
- run: oasis rofl build --deployment ${DEPLOYMENT} --offline --verify
-
- - name: Generate ROFL app configuration update call
- run: |
- oasis rofl update --deployment ${DEPLOYMENT} --format cbor --unsigned --account test:alice -o update.cbor
- echo "APP_CONFIG_UPDATE_FILE=update.cbor" >> $GITHUB_ENV
-
- - name: Push ROFL ORC to OCI repository and generate deploy call
- run: |
- oasis rofl deploy --deployment ${DEPLOYMENT} --format cbor --unsigned --account test:alice -o deploy.cbor
- echo "DEPLOY_FILE=deploy.cbor" >> $GITHUB_ENV
-
- - name: Set up Node.js 22
- uses: actions/setup-node@v4
- with:
- node-version: "22.x"
- cache: yarn
- cache-dependency-path: scripts/propose_transactions
-
- - name: Install dependencies
- run: yarn install --frozen-lockfile
- working-directory: scripts/propose_transactions
-
- - name: Propose transactions to safe.oasis.io
- env:
- PROPOSER_PRIVATE_KEY: ${{ secrets.PROPOSER_PRIVATE_KEY }}
- run: node ./scripts/propose_transactions/main.mjs
diff --git a/.github/workflows/thread_sentiment.yml b/.github/workflows/thread_sentiment.yml
deleted file mode 100644
index e0f3ce50..00000000
--- a/.github/workflows/thread_sentiment.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-name: Thread Sentiment Analysis
-
-on:
- schedule:
- # Do not run automatically
- - cron: '0 0 1 1 0'
-
-jobs:
- post_question:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: '3.x'
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install tweepy
- - name: Post question
- env:
- TWITTER_CONSUMER_KEY: ${{ secrets.TWITTER_CONSUMER_KEY }}
- TWITTER_CONSUMER_SECRET: ${{ secrets.TWITTER_CONSUMER_SECRET }}
- TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }}
- TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
- run: python -c "from src.thread_sentiment.main import post_question; post_question()"
- - name: Upload tweet ID
- uses: actions/upload-artifact@v3
- with:
- name: tweet_id
- path: tweet_id.txt
-
- analyze_and_post_sentiment:
- runs-on: ubuntu-latest
- needs: post_question
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: '3.x'
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install tweepy
- - name: Download tweet ID
- uses: actions/download-artifact@v3
- with:
- name: tweet_id
- - name: Analyze and post sentiment
- env:
- TWITTER_CONSUMER_KEY: ${{ secrets.TWITTER_CONSUMER_KEY }}
- TWITTER_CONSUMER_SECRET: ${{ secrets.TWITTER_CONSUMER_SECRET }}
- TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }}
- TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
- # TODO: Add environment variable for LLM API key
- run: python -c "from src.thread_sentiment.main import analyze_and_post_sentiment; analyze_and_post_sentiment()"
diff --git a/.github/workflows/verify-docker-image.yml b/.github/workflows/verify-docker-image.yml
deleted file mode 100644
index 6f26829b..00000000
--- a/.github/workflows/verify-docker-image.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: verify-docker-image
-
-on:
- workflow_dispatch:
-
-jobs:
- verify-docker-image:
- runs-on: ubuntu-latest
- permissions:
- contents: read
- packages: write
- env:
- DEPLOYMENT: mainnet
- SAFE_ADDRESS: "0x70739eB50e269f1f1eb27c6f8932f63389B1Cb63"
- steps:
- - name: Checkout
- uses: actions/checkout@v4
-
- # TODO: Move this to dedicated GitHub Action for CLI install.
- - name: Install Oasis CLI
- env:
- CLI_VERSION: 0.16.0
- run: |
- mkdir -p /tmp/oasis-cli
- cd /tmp/oasis-cli
- wget "https://github.com/oasisprotocol/cli/releases/download/v${CLI_VERSION}/oasis_cli_${CLI_VERSION}_linux_amd64.tar.gz" -O cli.tar.gz
- tar --strip-components 1 -xf cli.tar.gz
- echo "/tmp/oasis-cli" >> ${GITHUB_PATH}
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
-
- - name: Login to ghcr.io
- uses: docker/login-action@v3
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build and push Talos Docker image
- run: |
- OUTPUT_IMAGE_NAME_PATH="/tmp/image-name" ./scripts/build_and_push_container_image.sh
- echo "EXPECTED_TALOS_AGENT_IMAGE=$(cat /tmp/image-name)" >> ${GITHUB_ENV}
-
- - name: Verify image digest matches the compose file
- run: ./scripts/verify_container_image.sh
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 1a71a363..00000000
--- a/.gitignore
+++ /dev/null
@@ -1,163 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-.python-version
-
-# pipenv
-# According to https://pipenv.pypa.io/en/latest/basics/#pipenv-and-git
-Pipfile.lock
-
-# poetry
-# Similar to Pipfile.lock, it is recommended to keep poetry.lock in version control.
-# See https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-poetry.lock
-
-# pdm
-# Similar to Pipfile.lock, it is recommended to keep pdm.lock in version control.
-# See https://pdm.fming.dev/latest/usage/lockfile.html
-pdm.lock
-.pdm-python
-
-# PEP 582; used by e.g. pdm
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.env.local
-.venv
-.venv-*
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# Talos keys
-.keys/
-
-# Test memory files
-test_memory.index
-test_memory.json
-
-# vim
-*.swp
-
-# ROFL ORCs
-*.orc
-data/
-
-
-scratch.py
-tmp.py
diff --git a/.vscode/settings.json b/.vscode/settings.json
deleted file mode 100644
index aba39f8a..00000000
--- a/.vscode/settings.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "editor.formatOnSave": true,
- "editor.rulers": [
- 88
- ],
- "editor.tabSize": 4,
- "editor.insertSpaces": true,
- "files.trimTrailingWhitespace": true,
- "files.insertFinalNewline": true,
- "python.analysis.typeCheckingMode": "strict",
- "python.linting.enabled": true,
- "python.linting.mypyEnabled": true,
- "python.linting.ruffEnabled": true,
- "python.formatting.provider": "ruff",
- "[python]": {
- "editor.defaultFormatter": "charliermarsh.ruff",
- "editor.formatOnSave": true,
- "editor.codeActionsOnSave": {
- "source.fixAll": "explicit",
- "source.organizeImports": "explicit"
- }
- },
- "ruff.lint.ignore": [
- "E704"
- ]
-}
diff --git a/AGENTS.md b/AGENTS.md
deleted file mode 100644
index 52cfbf98..00000000
--- a/AGENTS.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Agent Guidelines
-
-This document provides general guidance for agents committing to this repository.
-
-## Pre-commit Checks
-
-Before committing any changes, please ensure that you run the following checks:
-
-1. **Ruff:** Run `ruff` to lint and format the code.
-2. **Mypy:** Run `mypy` to check for type errors.
-3. **Pytest:** Run `pytest` to ensure all tests pass.
-
-## Code Style
-
-Please adhere to the following code style guidelines:
-
-* Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) for all Python code.
-* Use modern Python type hints (e.g., `list` and `dict` instead of `List` and `Dict`).
-* Never use quotes around type hints (e.g., `def foo() -> "MyClass": ...`). You can always add `from __future__ import annotations` to the top of the file if you need to delay the evaluation of type hints.
-* Use type hints for all function signatures.
-* Write clear and concise docstrings for all modules, classes, and functions.
-* Keep lines under 88 characters long.
-* When creating Pydantic `BaseModel`s, use `model_post_init` for any post-initialization logic instead of overriding the `__init__` method.
-* Always put imports at the top of the file, organized into sections (standard library, third-party, and first-party).
-* Use `ConfigDict` when working with Pydantic types to specify model-specific configuration, like `arbitrary_types_allowed=True`.
-
-## General Guidance
-
-* The default model for this project is `gpt-5`.
-* Write clear and descriptive commit messages.
-* Break down large changes into smaller, logical commits.
-* Ensure that all new code is covered by tests.
-* Update the documentation as needed.
-
-## Commands
-
-- `scripts/run_checks.sh`: Run ruff, mypy, and pytest.
diff --git a/CLI_GITHUB_COMMANDS.md b/CLI_GITHUB_COMMANDS.md
deleted file mode 100644
index 2105428f..00000000
--- a/CLI_GITHUB_COMMANDS.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# GitHub CLI Commands
-
-The Talos CLI includes a GitHub sub-app with commands for managing pull requests.
-
-## Setup
-
-Set your GitHub API token as an environment variable:
-```bash
-export GITHUB_API_TOKEN=your_github_token_here
-```
-
-## Repository Parameter
-
-All GitHub commands require a target repository. You can specify it in two ways:
-
-1. **Environment variable** (recommended for repeated use):
- ```bash
- export GITHUB_REPO=owner/repo
- uv run talos github get-prs
- ```
-
-2. **Command line argument**:
- ```bash
- uv run talos github get-prs --repo owner/repo
- ```
-
-## Commands
-
-### `get-prs` - List Pull Requests
-
-List all pull requests for a repository.
-
-```bash
-# List open PRs (default)
-uv run talos github get-prs --repo microsoft/vscode
-
-# List all PRs (open, closed, merged)
-uv run talos github get-prs --repo microsoft/vscode --state all
-
-# List closed PRs
-uv run talos github get-prs --repo microsoft/vscode --state closed
-```
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-- `--state`: PR state - 'open' (default), 'closed', or 'all'
-
-### `review-pr` - AI-Powered PR Review
-
-Review a pull request using AI analysis with security and quality scoring.
-
-```bash
-# Review a PR (display results only)
-uv run talos github review-pr 123 --repo microsoft/vscode
-
-# Review and post the review as a comment on GitHub
-uv run talos github review-pr 123 --repo microsoft/vscode --post
-
-# Review with auto-approval if criteria are met
-uv run talos github review-pr 123 --repo microsoft/vscode --auto-approve
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to review
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-- `--post`: Post the review as a comment on the PR
-- `--auto-approve`: Automatically approve if criteria are met
-
-**Output includes:**
-- Detailed review analysis
-- Security score (0-100)
-- Quality score (0-100)
-- Recommendation (approve/request changes/etc.)
-- Reasoning for the recommendation
-
-### `approve-pr` - Force Approve PR
-
-Force approve a pull request without AI analysis.
-
-```bash
-uv run talos github approve-pr 123 --repo microsoft/vscode
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to approve
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-
-### `merge-pr` - Merge Pull Request
-
-Merge a pull request.
-
-```bash
-uv run talos github merge-pr 123 --repo microsoft/vscode
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to merge
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-
-## Examples
-
-```bash
-# Set up environment
-export GITHUB_API_TOKEN=ghp_your_token_here
-export GITHUB_REPO=microsoft/vscode
-
-# List open PRs
-uv run talos github get-prs
-
-# Review PR #123 and post the review
-uv run talos github review-pr 123 --post
-
-# Approve PR #123
-uv run talos github approve-pr 123
-
-# Merge PR #123
-uv run talos github merge-pr 123
-```
-
-## Error Handling
-
-The commands include comprehensive error handling for:
-- Missing or invalid repository format
-- Missing GitHub API token
-- Network connectivity issues
-- GitHub API rate limiting
-- Invalid PR numbers
-- Insufficient permissions
-
-All errors are displayed with helpful messages to guide troubleshooting.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index 7595dd30..00000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Contributing
-
-Thank you for your interest in contributing to Talos! This document provides guidelines for contributing to the project.
-
-## Development Setup
-
-1. **Clone the repository**:
- ```bash
- git clone https://github.com/talos-agent/talos.git
- cd talos
- ```
-
-2. **Set up the development environment**:
- ```bash
- uv venv
- source .venv/bin/activate
- ./scripts/install_deps.sh
- ```
-
-3. **Set up environment variables**:
- ```bash
- export OPENAI_API_KEY="your-openai-api-key"
- export PINATA_API_KEY="your-pinata-api-key"
- export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
- # Optional for full functionality
- export GITHUB_API_TOKEN="your-github-token"
- export TWITTER_BEARER_TOKEN="your-twitter-bearer-token"
- export ARBISCAN_API_KEY="your-arbiscan-api-key"
- ```
-
-## Code Quality Checks
-
-Before submitting a pull request, ensure your code passes all checks:
-
-### Linting and Formatting
-```bash
-uv run ruff check .
-uv run ruff format .
-```
-
-### Type Checking
-```bash
-uv run mypy src
-```
-
-### Testing
-```bash
-uv run pytest
-```
-
-### Run All Checks
-```bash
-./scripts/run_checks.sh
-```
-
-## Code Style Guidelines
-
-- Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) for Python code
-- Use modern Python type hints (`list` and `dict` instead of `List` and `Dict`)
-- Never use quotes around type hints
-- Use type hints for all function signatures
-- Write clear and concise docstrings for modules, classes, and functions
-- Keep lines under 88 characters long
-- Use `model_post_init` for Pydantic `BaseModel` post-initialization logic
-- Organize imports: standard library, third-party, first-party
-- Use `ConfigDict` for Pydantic model configuration
-
-## Documentation Standards
-
-- Update documentation when adding new features
-- Include usage examples in CLI documentation
-- Ensure README files are accurate and up-to-date
-- Add docstrings to all public functions and classes
-- Update environment variable documentation when adding new requirements
-
-## Testing Guidelines
-
-- Write tests for all new functionality
-- Ensure existing tests continue to pass
-- Include both unit tests and integration tests where appropriate
-- Test error handling and edge cases
-- Mock external API calls in tests
-
-## Pull Request Process
-
-1. **Create a feature branch**:
- ```bash
- git checkout -b feature/your-feature-name
- ```
-
-2. **Make your changes** following the guidelines above
-
-3. **Run all checks** to ensure code quality
-
-4. **Commit your changes** with clear, descriptive commit messages
-
-5. **Push your branch** and create a pull request
-
-6. **Ensure CI passes** and address any feedback
-
-## Commit Message Guidelines
-
-- Use clear, descriptive commit messages
-- Start with a verb in the imperative mood
-- Keep the first line under 50 characters
-- Include additional details in the body if needed
-
-Examples:
-```
-Add memory search functionality to CLI
-
-Implement semantic search for agent memories with user filtering
-and configurable result limits. Includes both database and file
-backend support.
-```
-
-## Issue Reporting
-
-When reporting issues:
-- Use the issue templates when available
-- Provide clear reproduction steps
-- Include relevant environment information
-- Add logs or error messages when applicable
-
-## Feature Requests
-
-For feature requests:
-- Clearly describe the proposed functionality
-- Explain the use case and benefits
-- Consider implementation complexity
-- Discuss potential alternatives
-
-## Getting Help
-
-- Check existing documentation first
-- Search existing issues and discussions
-- Join our community channels for questions
-- Tag maintainers for urgent issues
-
-## License
-
-By contributing to Talos, you agree that your contributions will be licensed under the MIT License.
diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md
deleted file mode 100644
index 29c05a40..00000000
--- a/DEPLOYMENT.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# ROFL Deployment
-
-The Talos agent is deployed in a TEE via [ROFL] using on-chain governance. In order to propose a
-new release for approval and deployment, the following steps should be taken.
-
-[ROFL]: https://docs.oasis.io/build/rofl
-
-## Build and Push Container Images
-
-Build and push the new production Docker images by running:
-
-```bash
-./scripts/build_and_push_container_image.sh
-```
-
-After the build process completes successfully, you should get output like the following:
-```
-=> [auth] talos-agent/talos:pull,push token for ghcr.io 0.0s
-=> resolving provenance for metadata file 0.0s
-ghcr.io/talos-agent/talos:latest-agent@sha256:00a7ed860e2bcf16627a4ecd2d98fae4c7e9936774366829892d18fb959f80b2
-```
-
-The last line contains the resulting image reference.
-
-## Update Compose File
-
-Update the ROFL-specific compose file (`docker-compose.rofl.yaml`) to make sure that it references
-the correct image by its SHA256 hash as returned by the previous step.
-
-```yaml
-services:
- talos-agent:
- build: .
- # NOTE: Run ./scripts/build_and_push_container_image.sh to retrieve the digest.
- image: ghcr.io/talos-agent/talos:latest-agent@sha256:00a7ed860e2bcf16627a4ecd2d98fae4c7e9936774366829892d18fb959f80b2
- container_name: talos-agent
- # ... other parts omitted ...
-```
-
-## Rebuild App and Update Manifest
-
-Rebuild the app by running:
-
-```bash
-oasis rofl build --deployment mainnet
-```
-
-This will update the `rofl.yaml` with the new app identity.
-
-## Create a PR
-
-Create a pull request with the above changes and get it merged.
-
-## Tag a New Release
-
-After the pull request is merged, tag a new release and push it by running:
-
-```bash
-VERSION=0.1.0
-git fetch origin && \
-git merge --ff-only origin/main && \
-git tag v$VERSION main && \
-git push -u origin v$VERSION
-```
-
-After the release is pushed, a GitHub action will automatically verify the manifest changes and
-if all checks pass, generate a proposal for on-chain approval.
-
-## Get On-chain Approval
-
-After the proposal is generated it will automatically appear in governance interface for review and
-approval. When the quorum of signers approve the proposal, the new release will be deployed.
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 86f6a959..00000000
--- a/Dockerfile
+++ /dev/null
@@ -1,70 +0,0 @@
-FROM python:3.12-slim@sha256:d67a7b66b989ad6b6d6b10d428dcc5e0bfc3e5f88906e67d490c4d3daac57047 AS builder
-
-WORKDIR /app
-
-# Pin versions and timestamps for reproducibility.
-ARG SOURCE_DATE_EPOCH=1755248916
-ARG DEBIAN_SNAPSHOT=20250815T025533Z
-ARG DEBIAN_DIST=trixie
-ARG UV_VERSION=0.8.11
-# Do not include uv metadata as that includes non-reproducable timestamps.
-ARG UV_NO_INSTALLER_METADATA=1
-# Disable emitting debug symbols as those can contain randomized local paths.
-ARG CFLAGS="-g0"
-
-# Install Debian packages.
-RUN rm -f /etc/apt/sources.list.d/* && \
- echo "deb [check-valid-until=no] https://snapshot.debian.org/archive/debian/${DEBIAN_SNAPSHOT} ${DEBIAN_DIST} main" > /etc/apt/sources.list && \
- echo "deb [check-valid-until=no] https://snapshot.debian.org/archive/debian-security/${DEBIAN_SNAPSHOT} ${DEBIAN_DIST}-security main" >> /etc/apt/sources.list && \
- echo 'Acquire::Check-Valid-Until "false";' > /etc/apt/apt.conf.d/10no-check-valid-until && \
- apt-get update && \
- apt-get install -y --no-install-recommends gcc libc6-dev
-
-# Install uv for Python package management.
-RUN pip install uv==${UV_VERSION}
-
-# Create virtualenv and install Python dependencies.
-COPY pyproject.toml uv.lock README.md ./
-COPY src/ ./src/
-COPY alembic.ini ./
-COPY alembic/ ./alembic/
-# Ensure all source files have fixed timestamps, permissions and owners.
-RUN find -exec touch -d @${SOURCE_DATE_EPOCH} "{}" \; && \
- find -type f -exec chmod 644 "{}" \; && \
- find -type d -exec chmod 755 "{}" \; && \
- chown -R root:root .
-
-RUN uv venv && \
- . .venv/bin/activate && \
- uv sync --locked
-
-FROM python:3.12-slim@sha256:d67a7b66b989ad6b6d6b10d428dcc5e0bfc3e5f88906e67d490c4d3daac57047
-
-WORKDIR /app
-
-ARG SOURCE_DATE_EPOCH
-
-# Create data directory
-RUN mkdir -p /app/data
-
-COPY --from=builder /app/.venv /app/.venv
-COPY --from=builder /app/src /app/src
-COPY --from=builder /app/pyproject.toml /app/pyproject.toml
-COPY --from=builder /app/alembic.ini /app/alembic.ini
-COPY --from=builder /app/alembic /app/alembic
-COPY entrypoint.sh /app/entrypoint.sh
-RUN chmod +x /app/entrypoint.sh
-
-ENV PATH="/app/.venv/bin:$PATH"
-ENV PYTHONPATH="/app/src"
-
-# Required environment variables (must be provided at runtime):
-# - OPENAI_API_KEY: OpenAI API key for AI functionality
-# - GITHUB_API_TOKEN: GitHub token for repository operations
-# - TWITTER_BEARER_TOKEN: Twitter API bearer token for social media features
-# - PINATA_API_KEY: Pinata API key for IPFS operations
-# - PINATA_SECRET_API_KEY: Pinata secret key for IPFS operations
-
-EXPOSE 8080
-
-CMD ["python", "-m", "talos.cli.server"]
diff --git a/README.md b/README.md
deleted file mode 100644
index 2a312e6a..00000000
--- a/README.md
+++ /dev/null
@@ -1,234 +0,0 @@
-
-
-
-
-# Talos: An AI Protocol Owner
-
-[](https://docs.talos.is/)
-[](https://github.com/talos-agent/talos/releases)
-[](https://python.org)
-[](LICENSE)
-
-**🤖 An AI agent designed to act as an autonomous owner for decentralized protocols**
-
-Talos is not just a chatbot; it is a sophisticated AI system that can manage and govern a protocol, ensuring its integrity and security through advanced supervision and governance capabilities.
-
-📖 **[Read the Documentation](https://docs.talos.is/)** | 🚀 **[Quick Start](#usage)** | 🛠️ **[Development](#development)**
-
-
-
-## What is Talos?
-
-Talos is an AI agent that can:
-
-- **Govern Protocol Actions:** Talos uses a Hypervisor to monitor and approve or deny actions taken by other agents or system components. This ensures that all actions align with the protocol's rules and objectives.
-- **Evaluate Governance Proposals:** Talos can analyze and provide recommendations on governance proposals, considering their potential benefits, risks, and community feedback.
-- **Interact with the Community:** Talos can engage with the community on platforms like Twitter to provide updates, answer questions, and gather feedback.
-- **Manage its Own Codebase:** Talos can interact with GitHub to manage its own source code, including reviewing and committing changes.
-- **Update Documentation:** Talos can update its own documentation on GitBook to ensure it remains accurate and up-to-date.
-
-## Directory Structure
-
-The repository is structured as follows:
-
-- `.github/`: Contains GitHub Actions workflows for CI/CD.
-- `src/`: Contains the source code for the Talos agent.
- - `talos/`: Contains the main source code for the Talos agent.
- - `core/`: Contains the core components of the agent, such as the CLI and the main agent loop.
- - `hypervisor/`: Contains the Hypervisor and Supervisor components, which are responsible for overseeing the agent's actions.
- - `services/`: Contains the different services that the agent can perform, such as evaluating proposals.
- - `prompts/`: Contains the prompts used by the agent.
- - `tools/`: Contains the tools that the agent can use, such as GitBook, GitHub, IPFS, and Twitter.
-- `tests/`: Contains the tests for the Talos agent.
-- `proposal_example.py`: An example of how to use the agent to evaluate a proposal.
-
-## Key Components
-
-Talos is comprised of several key components that allow it to function as a decentralized AI protocol owner:
-
-- **Hypervisor and Supervisor:** The Hypervisor is the core of Talos's governance capabilities. It monitors all actions and uses a Supervisor to approve or deny them based on a set of rules and the agent's history. This protects the protocol from malicious or erroneous actions.
-- **Proposal Evaluation System:** Talos can systematically evaluate governance proposals, providing a detailed analysis to help stakeholders make informed decisions.
-- **Tool-Based Architecture:** Talos uses a variety of tools to interact with external services like Twitter, GitHub, and GitBook, allowing it to perform a wide range of tasks.
-
-## Services
-
-Talos provides a set of services for interacting with various platforms:
-
-- **Twitter:** Talos can use its Twitter service to post tweets, reply to mentions, and monitor conversations, allowing it to engage with the community and stay informed about the latest developments.
-- **GitHub:** The GitHub service enables Talos to interact with repositories, manage issues, and review and commit code. This allows Talos to autonomously manage its own codebase and contribute to other projects.
-- **GitBook:** With the GitBook service, Talos can create, edit, and manage documentation. This ensures that the project's documentation is always up-to-date.
-
-## Development
-
-This project uses `uv` for dependency management and requires Python 3.12+.
-
-1. Create a virtual environment:
-
- ```bash
- uv venv
- ```
-
-2. Activate the virtual environment:
-
- ```bash
- source .venv/bin/activate
- ```
-
-3. Install dependencies:
-
- ```bash
- ./scripts/install_deps.sh
- ```
-
-## Usage
-
-### Interactive CLI
-
-To start the interactive CLI, run the following command:
-
-```bash
-export OPENAI_API_KEY="your-openai-api-key"
-export PINATA_API_KEY="your-pinata-api-key"
-export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
-uv run talos
-```
-
-You can then interact with the agent in a continuous conversation. To exit, type `exit`.
-
-### Non-Interactive Mode
-
-Run a single query and exit:
-
-```bash
-uv run talos "your query here"
-```
-
-### Daemon Mode
-
-To run the agent in daemon mode for continuous operation with scheduled jobs:
-
-```bash
-export OPENAI_API_KEY="your-openai-api-key"
-export GITHUB_API_TOKEN="your-github-token"
-export TWITTER_BEARER_TOKEN="your-twitter-bearer-token"
-export PINATA_API_KEY="your-pinata-api-key"
-export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
-uv run talos daemon
-```
-
-The daemon will run continuously, executing scheduled jobs and can be gracefully shutdown with SIGTERM or SIGINT.
-
-### Available CLI Commands
-
-| Command | Description |
-|---------|-------------|
-| `twitter` | Twitter-related operations and sentiment analysis |
-| `github` | GitHub repository management and PR reviews |
-| `proposals` | Governance proposal evaluation |
-| `memory` | Memory management and search operations |
-| `arbiscan` | Arbitrum blockchain contract source code retrieval |
-| `generate-keys` | Generate RSA key pairs for encryption |
-| `get-public-key` | Retrieve the current public key |
-| `encrypt` | Encrypt data using public key |
-| `decrypt` | Decrypt data using private key |
-| `daemon` | Run in continuous daemon mode |
-| `cleanup-users` | Clean up temporary users and conversation data |
-| `db-stats` | Show database statistics |
-
-For detailed command usage, see the [CLI Documentation](https://docs.talos.is/cli/overview/).
-
-### Docker Usage
-
-#### Building and Running with Docker
-
-1. Build the Docker image:
- ```bash
- docker build -t talos-agent .
- ```
-
-2. Run the container with environment variables:
- ```bash
- docker run -d \
- -e OPENAI_API_KEY="your-openai-api-key" \
- -e GITHUB_API_TOKEN="your-github-token" \
- -e TWITTER_BEARER_TOKEN="your-twitter-bearer-token" \
- -e PINATA_API_KEY="your-pinata-api-key" \
- -e PINATA_SECRET_API_KEY="your-pinata-secret-api-key" \
- --name talos-agent \
- talos-agent
- ```
-
-3. View logs:
- ```bash
- docker logs -f talos-agent
- ```
-
-4. Graceful shutdown:
- ```bash
- docker stop talos-agent
- ```
-
-#### Using Docker Compose
-
-1. Create a `.env` file with your API keys:
- ```bash
- OPENAI_API_KEY=your-openai-api-key
- GITHUB_API_TOKEN=your-github-token
- TWITTER_BEARER_TOKEN=your-twitter-bearer-token
- PINATA_API_KEY=your-pinata-api-key
- PINATA_SECRET_API_KEY=your-pinata-secret-api-key
- ```
-
-2. Start the service:
- ```bash
- docker-compose up -d
- ```
-
-3. View logs:
- ```bash
- docker-compose logs -f
- ```
-
-4. Stop the service:
- ```bash
- docker-compose down
- ```
-
-#### Required Environment Variables
-
-- `OPENAI_API_KEY`: Required for AI functionality
-- `PINATA_API_KEY`: Required for IPFS operations
-- `PINATA_SECRET_API_KEY`: Required for IPFS operations
-
-#### Optional Environment Variables
-
-- `GITHUB_API_TOKEN`: Required for GitHub operations
-- `TWITTER_BEARER_TOKEN`: Required for Twitter functionality
-- `ARBISCAN_API_KEY`: Optional for higher rate limits when accessing Arbitrum contract data
-
-#### Graceful Shutdown
-
-The Docker container supports graceful shutdown. When you run `docker stop`, it sends a SIGTERM signal to the process, which triggers:
-
-1. Stopping the job scheduler
-2. Completing any running jobs
-3. Clean shutdown of all services
-
-The container will wait up to 10 seconds for graceful shutdown before forcing termination.
-
-### Proposal Evaluation Example
-
-To run the proposal evaluation example, run the following command:
-
-```bash
-export OPENAI_API_key=""
-python proposal_example.py
-```
-
-## Testing, Linting and Type Checking
-
-To run the test suite, lint, and type-check the code, run the following command:
-
-```bash
-./scripts/run_checks.sh
-```
diff --git a/README_ARBISCAN.md b/README_ARBISCAN.md
deleted file mode 100644
index 5702145c..00000000
--- a/README_ARBISCAN.md
+++ /dev/null
@@ -1,102 +0,0 @@
-# Arbiscan Service
-
-This service provides integration with the Etherscan V2 API to retrieve verified smart contract source code and ABI data from Arbitrum networks.
-
-## Features
-
-- **Contract Source Code Retrieval**: Get the complete source code of verified smart contracts
-- **Contract ABI Retrieval**: Get the Application Binary Interface (ABI) of verified contracts
-- **Multi-Network Support**: Supports Arbitrum One (42161), Arbitrum Nova (42170), and Arbitrum Sepolia Testnet (421614)
-- **Error Handling**: Graceful handling of API errors and unverified contracts
-- **Supervised Tool Integration**: Integrated with Talos hypervisor system for controlled access
-
-## Usage
-
-### Direct API Usage
-
-```python
-from talos.utils.arbiscan import get_contract_source_code, get_contract_abi
-
-# Get contract source code
-contract_address = "0xaf88d065e77c8cC2239327C5EDb3A432268e5831"
-source_code = get_contract_source_code(
- contract_address=contract_address,
- api_key="your_etherscan_api_key", # Required
- chain_id=42161 # Arbitrum One
-)
-
-print(f"Contract: {source_code.contract_name}")
-print(f"Compiler: {source_code.compiler_version}")
-print(f"Source: {source_code.source_code}")
-
-# Get contract ABI
-abi = get_contract_abi(
- contract_address=contract_address,
- api_key="your_etherscan_api_key", # Required
- chain_id=42161
-)
-
-print(f"ABI functions: {len(abi.abi)}")
-```
-
-### Tool Usage (via Talos Agent)
-
-The service is automatically registered as supervised tools in the main agent:
-
-- `arbiscan_source_code_tool`: Retrieves contract source code
-- `arbiscan_abi_tool`: Retrieves contract ABI
-
-## API Key Requirement
-
-**Important**: An Etherscan API key is required for all requests. To get an API key:
-
-1. Create an account at [https://etherscan.io/](https://etherscan.io/)
-2. Go to your Account Dashboard
-3. Click on the "API-KEYs" tab
-4. Create a new API key for your project
-
-Each Etherscan account can create up to 3 API keys.
-
-## Supported Networks
-
-| Network | Chain ID | Description |
-|---------|----------|-------------|
-| Arbitrum One Mainnet | 42161 | Main Arbitrum network |
-| Arbitrum Nova Mainnet | 42170 | Arbitrum Nova network |
-| Arbitrum Sepolia Testnet | 421614 | Arbitrum testnet |
-
-## Error Handling
-
-The service handles various error conditions:
-
-- **Missing API Key**: Returns clear error message about API key requirement
-- **Invalid Contract Address**: Returns error for non-existent contracts
-- **Unverified Contracts**: Returns error for contracts that haven't been verified
-- **Network Issues**: Handles HTTP request failures gracefully
-
-## Data Models
-
-### ContractSourceCode
-Contains complete contract information including:
-- Source code
-- ABI (as JSON string)
-- Contract name
-- Compiler version and settings
-- Optimization details
-- Constructor arguments
-- Proxy information (if applicable)
-
-### ContractABI
-Contains parsed ABI data:
-- ABI as list of function/event definitions
-- Ready for use with web3 libraries
-
-## Testing
-
-Run the test script to verify the service:
-
-```bash
-python test_arbiscan.py
-```
-
-Note: Without an API key, the test will demonstrate proper error handling.
diff --git a/Relativty_Driver/Relativty/resources/settings/default.vrsettings b/Relativty_Driver/Relativty/resources/settings/default.vrsettings
new file mode 100644
index 00000000..167f43cb
--- /dev/null
+++ b/Relativty_Driver/Relativty/resources/settings/default.vrsettings
@@ -0,0 +1,23 @@
+{
+ "Relativty_hmd": {
+ "hmdPid": 32823,
+ "hmdVid": 9025,
+ "hmdIMUdmpPackets": false,
+ "IPDmeters": 0.063
+ },
+ "Relativty_extendedDisplay": {
+ "windowX": 3440,
+ "windowY": 0,
+ "windowWidth": 1920,
+ "windowHeight": 1080,
+ "renderWidth": 1920,
+ "renderHeight": 1080,
+ "DistortionK1": 0.4,
+ "DistortionK2": 0.5,
+ "ZoomWidth": 1,
+ "ZoomHeight": 1,
+ "EyeGapOffsetPx": 0,
+ "IsDisplayRealDisplay": true,
+ "IsDisplayOnDesktop": true
+ }
+}
\ No newline at end of file
diff --git a/Relativty_Electronics_build/Assembly/jlcpcb.com_motherboard_BOM.csv b/Relativty_Electronics_build/Assembly/jlcpcb.com_motherboard_BOM.csv
new file mode 100644
index 00000000..de19f790
--- /dev/null
+++ b/Relativty_Electronics_build/Assembly/jlcpcb.com_motherboard_BOM.csv
@@ -0,0 +1,2 @@
+This is a placeholder for the jlcpcb.com motherboard Bill of Materials.
+It should be a CSV file with a list of components.
\ No newline at end of file
diff --git a/Relativty_Electronics_build/Assembly/motherboard_BOM.xlsx b/Relativty_Electronics_build/Assembly/motherboard_BOM.xlsx
new file mode 100644
index 00000000..4167c292
--- /dev/null
+++ b/Relativty_Electronics_build/Assembly/motherboard_BOM.xlsx
@@ -0,0 +1,2 @@
+This is a placeholder for the motherboard Bill of Materials.
+It should be an Excel file with a list of components.
\ No newline at end of file
diff --git a/Relativty_Electronics_build/GerberFiles/GerberFiles.zip b/Relativty_Electronics_build/GerberFiles/GerberFiles.zip
new file mode 100644
index 00000000..06e78ca8
--- /dev/null
+++ b/Relativty_Electronics_build/GerberFiles/GerberFiles.zip
@@ -0,0 +1,2 @@
+This is a placeholder for the Gerber files.
+It should be a zip file containing the Gerber files for the PCB.
\ No newline at end of file
diff --git a/Relativty_Firmware/MP9250-HID/MP9250-HID/MP9250-HID.ino b/Relativty_Firmware/MP9250-HID/MP9250-HID/MP9250-HID.ino
new file mode 100644
index 00000000..d74e0b83
--- /dev/null
+++ b/Relativty_Firmware/MP9250-HID/MP9250-HID/MP9250-HID.ino
@@ -0,0 +1,2 @@
+// Placeholder for the Relativty MPU-9250 firmware.
+// This file should contain the Arduino code for the headset using the MPU-9250 IMU.
\ No newline at end of file
diff --git a/Relativty_Firmware/firmware/firmware.ino b/Relativty_Firmware/firmware/firmware.ino
new file mode 100644
index 00000000..87430624
--- /dev/null
+++ b/Relativty_Firmware/firmware/firmware.ino
@@ -0,0 +1,2 @@
+// Placeholder for the Relativty firmware.
+// This file should contain the Arduino code for the headset.
\ No newline at end of file
diff --git a/Relativty_Mechanical_build/housing.STL b/Relativty_Mechanical_build/housing.STL
new file mode 100644
index 00000000..68330d43
--- /dev/null
+++ b/Relativty_Mechanical_build/housing.STL
@@ -0,0 +1 @@
+// Placeholder for the 3D printable housing model.
\ No newline at end of file
diff --git a/Relativty_Mechanical_build/screws_BOM.xlsx b/Relativty_Mechanical_build/screws_BOM.xlsx
new file mode 100644
index 00000000..da4453e2
--- /dev/null
+++ b/Relativty_Mechanical_build/screws_BOM.xlsx
@@ -0,0 +1,2 @@
+This is a placeholder for the screws Bill of Materials.
+It should be an Excel file with a list of screws needed for the mechanical assembly.
\ No newline at end of file
diff --git a/alembic.ini b/alembic.ini
deleted file mode 100644
index 2d058fdd..00000000
--- a/alembic.ini
+++ /dev/null
@@ -1,141 +0,0 @@
-# A generic, single database configuration.
-
-[alembic]
-# path to migration scripts.
-# this is typically a path given in POSIX (e.g. forward slashes)
-# format, relative to the token %(here)s which refers to the location of this
-# ini file
-script_location = %(here)s/alembic
-
-# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
-# Uncomment the line below if you want the files to be prepended with date and time
-# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
-# for all available tokens
-# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
-
-# sys.path path, will be prepended to sys.path if present.
-# defaults to the current working directory. for multiple paths, the path separator
-# is defined by "path_separator" below.
-prepend_sys_path = .
-
-
-# timezone to use when rendering the date within the migration file
-# as well as the filename.
-# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
-# Any required deps can installed by adding `alembic[tz]` to the pip requirements
-# string value is passed to ZoneInfo()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the "slug" field
-# truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; This defaults
-# to /versions. When using multiple version
-# directories, initial revisions must be specified with --version-path.
-# The path separator used here should be the separator specified by "path_separator"
-# below.
-# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
-
-# path_separator; This indicates what character is used to split lists of file
-# paths, including version_locations and prepend_sys_path within configparser
-# files such as alembic.ini.
-# The default rendered in new alembic.ini files is "os", which uses os.pathsep
-# to provide os-dependent path splitting.
-#
-# Note that in order to support legacy alembic.ini files, this default does NOT
-# take place if path_separator is not present in alembic.ini. If this
-# option is omitted entirely, fallback logic is as follows:
-#
-# 1. Parsing of the version_locations option falls back to using the legacy
-# "version_path_separator" key, which if absent then falls back to the legacy
-# behavior of splitting on spaces and/or commas.
-# 2. Parsing of the prepend_sys_path option falls back to the legacy
-# behavior of splitting on spaces, commas, or colons.
-#
-# Valid values for path_separator are:
-#
-# path_separator = :
-# path_separator = ;
-# path_separator = space
-# path_separator = newline
-#
-# Use os.pathsep. Default configuration used for new projects.
-path_separator = os
-
-# set to 'true' to search source files recursively
-# in each "version_locations" directory
-# new in Alembic version 1.10
-# recursive_version_locations = false
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-# database URL. This is consumed by the user-maintained env.py script only.
-# other means of configuring database URLs may be customized within the env.py
-# file.
-sqlalchemy.url = sqlite:///talos_data.db
-
-
-[post_write_hooks]
-# post_write_hooks defines scripts or Python functions that are run
-# on newly generated revision scripts. See the documentation for further
-# detail and examples
-
-# format using "black" - use the console_scripts runner, against the "black" entrypoint
-# hooks = black
-# black.type = console_scripts
-# black.entrypoint = black
-# black.options = -l 79 REVISION_SCRIPT_FILENAME
-
-# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
-# hooks = ruff
-# ruff.type = exec
-# ruff.executable = %(here)s/.venv/bin/ruff
-# ruff.options = check --fix REVISION_SCRIPT_FILENAME
-
-# Logging configuration. This is also consumed by the user-maintained
-# env.py script only.
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARNING
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARNING
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/alembic/README b/alembic/README
deleted file mode 100644
index 98e4f9c4..00000000
--- a/alembic/README
+++ /dev/null
@@ -1 +0,0 @@
-Generic single-database configuration.
\ No newline at end of file
diff --git a/alembic/env.py b/alembic/env.py
deleted file mode 100644
index 9ffb4036..00000000
--- a/alembic/env.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import os
-import sys
-from logging.config import fileConfig
-
-from sqlalchemy import engine_from_config, pool
-
-from alembic import context
-
-# Add the src directory to the Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src"))
-
-# Import the database models
-# Note: These imports must come after sys.path modification
-from talos.database.models import Base # noqa: E402
-from talos.database.session import get_database_url # noqa: E402
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-if config.config_file_name is not None:
- fileConfig(config.config_file_name)
-
-# add your model's MetaData object here
-# for 'autogenerate' support
-target_metadata = Base.metadata
-
-# other values from the config, defined by the needs of env.py,
-# can be acquired:
-# my_important_option = config.get_main_option("my_important_option")
-# ... etc.
-
-
-def run_migrations_offline() -> None:
- """Run migrations in 'offline' mode.
-
- This configures the context with just a URL
- and not an Engine, though an Engine is acceptable
- here as well. By skipping the Engine creation
- we don't even need a DBAPI to be available.
-
- Calls to context.execute() here emit the given string to the
- script output.
-
- """
- # Use the same database URL logic as the main application
- url = get_database_url()
- context.configure(
- url=url,
- target_metadata=target_metadata,
- literal_binds=True,
- dialect_opts={"paramstyle": "named"},
- )
-
- with context.begin_transaction():
- context.run_migrations()
-
-
-def run_migrations_online() -> None:
- """Run migrations in 'online' mode.
-
- In this scenario we need to create an Engine
- and associate a connection with the context.
-
- """
- # Use the same database URL logic as the main application
- url = get_database_url()
-
- # Create engine with the properly formatted URL
- connectable = engine_from_config(
- {"sqlalchemy.url": url},
- prefix="sqlalchemy.",
- poolclass=pool.NullPool,
- )
-
- with connectable.connect() as connection:
- context.configure(connection=connection, target_metadata=target_metadata)
-
- with context.begin_transaction():
- context.run_migrations()
-
-
-if context.is_offline_mode():
- run_migrations_offline()
-else:
- run_migrations_online()
diff --git a/alembic/script.py.mako b/alembic/script.py.mako
deleted file mode 100644
index 480b130d..00000000
--- a/alembic/script.py.mako
+++ /dev/null
@@ -1,28 +0,0 @@
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision: str = ${repr(up_revision)}
-down_revision: Union[str, None] = ${repr(down_revision)}
-branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
-depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- ${upgrades if upgrades else "pass"}
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- ${downgrades if downgrades else "pass"}
diff --git a/alembic/versions/2d1832776538_add_chainlink_bridging.py b/alembic/versions/2d1832776538_add_chainlink_bridging.py
deleted file mode 100644
index 2c4c7cd3..00000000
--- a/alembic/versions/2d1832776538_add_chainlink_bridging.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""add chainlink bridging
-
-Revision ID: 2d1832776538
-Revises: 54a4e7fb3a17
-Create Date: 2025-09-12 17:08:02.426767
-
-"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
-
-
-# revision identifiers, used by Alembic.
-revision: str = '2d1832776538'
-down_revision: Union[str, None] = '54a4e7fb3a17'
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('chainlink_bridges',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('source_chain_id', sa.Integer(), nullable=False),
- sa.Column('dest_chain_id', sa.Integer(), nullable=False),
- sa.Column('recipient_address', sa.String(length=42), nullable=False),
- sa.Column('token_address', sa.String(length=42), nullable=False),
- sa.Column('transaction_hash', sa.String(length=66), nullable=False),
- sa.Column('amount', sa.Numeric(precision=78), nullable=False),
- sa.Column('created_at', sa.DateTime(), nullable=False),
- sa.PrimaryKeyConstraint('id')
- )
- # ### end Alembic commands ###
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('chainlink_bridges')
- # ### end Alembic commands ###
diff --git a/alembic/versions/48f62aff9aa9_add_swaps.py b/alembic/versions/48f62aff9aa9_add_swaps.py
deleted file mode 100644
index 3193edb8..00000000
--- a/alembic/versions/48f62aff9aa9_add_swaps.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""add swaps
-
-Revision ID: 48f62aff9aa9
-Revises: d7ca3e4695b8
-Create Date: 2025-09-10 00:07:44.965094
-
-"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
-
-
-# revision identifiers, used by Alembic.
-revision: str = '48f62aff9aa9'
-down_revision: Union[str, None] = 'd7ca3e4695b8'
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('swaps',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('strategy_id', sa.String(length=255), nullable=False),
- sa.Column('wallet_address', sa.String(length=42), nullable=False),
- sa.Column('amount_in', sa.Numeric(precision=78), nullable=False),
- sa.Column('token_in', sa.String(length=42), nullable=False),
- sa.Column('amount_out', sa.Numeric(precision=78), nullable=False),
- sa.Column('token_out', sa.String(length=42), nullable=False),
- sa.Column('created_at', sa.DateTime(), nullable=False),
- sa.PrimaryKeyConstraint('id')
- )
- # ### end Alembic commands ###
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('swaps')
- # ### end Alembic commands ###
diff --git a/alembic/versions/54a4e7fb3a17_add_tx_hash_to_swaps.py b/alembic/versions/54a4e7fb3a17_add_tx_hash_to_swaps.py
deleted file mode 100644
index 9d34aa13..00000000
--- a/alembic/versions/54a4e7fb3a17_add_tx_hash_to_swaps.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""add tx_hash to swaps
-
-Revision ID: 54a4e7fb3a17
-Revises: 48f62aff9aa9
-Create Date: 2025-09-10 03:24:50.980377
-
-"""
-
-from typing import Sequence, Union
-
-import sqlalchemy as sa
-
-from alembic import op
-
-# revision identifiers, used by Alembic.
-revision: str = "54a4e7fb3a17"
-down_revision: Union[str, None] = "48f62aff9aa9"
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.add_column("swaps", sa.Column("transaction_hash", sa.String(length=66), nullable=False))
- op.add_column("swaps", sa.Column("chain_id", sa.Integer(), nullable=False))
- # ### end Alembic commands ###
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_column("swaps", "transaction_hash")
- op.drop_column("swaps", "chain_id")
- # ### end Alembic commands ###
diff --git a/alembic/versions/ba582872fd6c_initial_migration_with_all_models.py b/alembic/versions/ba582872fd6c_initial_migration_with_all_models.py
deleted file mode 100644
index cb82ef5b..00000000
--- a/alembic/versions/ba582872fd6c_initial_migration_with_all_models.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""Initial migration with all models
-
-Revision ID: ba582872fd6c
-Revises:
-Create Date: 2025-09-08 11:36:30.346494
-
-"""
-from typing import Sequence, Union
-
-from alembic import op
-import sqlalchemy as sa
-
-
-# revision identifiers, used by Alembic.
-revision: str = 'ba582872fd6c'
-down_revision: Union[str, None] = None
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table('users',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.String(length=255), nullable=False),
- sa.Column('is_temporary', sa.Boolean(), nullable=False),
- sa.Column('created_at', sa.DateTime(), nullable=False),
- sa.Column('last_active', sa.DateTime(), nullable=False),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index(op.f('ix_users_user_id'), 'users', ['user_id'], unique=True)
- op.create_table('contract_deployments',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('contract_signature', sa.String(length=66), nullable=False),
- sa.Column('contract_address', sa.String(length=42), nullable=False),
- sa.Column('chain_id', sa.Integer(), nullable=False),
- sa.Column('salt', sa.String(length=66), nullable=False),
- sa.Column('bytecode_hash', sa.String(length=66), nullable=False),
- sa.Column('deployment_metadata', sa.JSON(), nullable=True),
- sa.Column('transaction_hash', sa.String(length=66), nullable=False),
- sa.Column('deployed_at', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index('idx_signature_chain', 'contract_deployments', ['contract_signature', 'chain_id'], unique=True)
- op.create_index(op.f('ix_contract_deployments_chain_id'), 'contract_deployments', ['chain_id'], unique=False)
- op.create_index(op.f('ix_contract_deployments_contract_address'), 'contract_deployments', ['contract_address'], unique=False)
- op.create_index(op.f('ix_contract_deployments_contract_signature'), 'contract_deployments', ['contract_signature'], unique=False)
- op.create_table('conversation_history',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('session_id', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime(), nullable=False),
- sa.Column('updated_at', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index(op.f('ix_conversation_history_session_id'), 'conversation_history', ['session_id'], unique=False)
- op.create_table('datasets',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('dataset_metadata', sa.JSON(), nullable=True),
- sa.Column('created_at', sa.DateTime(), nullable=False),
- sa.Column('updated_at', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index(op.f('ix_datasets_name'), 'datasets', ['name'], unique=False)
- op.create_table('memories',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('description', sa.Text(), nullable=False),
- sa.Column('memory_metadata', sa.JSON(), nullable=True),
- sa.Column('embedding', sa.JSON(), nullable=True),
- sa.Column('timestamp', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_table('dataset_chunks',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('dataset_id', sa.Integer(), nullable=False),
- sa.Column('content', sa.Text(), nullable=False),
- sa.Column('embedding', sa.JSON(), nullable=True),
- sa.Column('chunk_index', sa.Integer(), nullable=False),
- sa.Column('chunk_metadata', sa.JSON(), nullable=True),
- sa.ForeignKeyConstraint(['dataset_id'], ['datasets.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_table('messages',
- sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
- sa.Column('user_id', sa.Integer(), nullable=False),
- sa.Column('conversation_id', sa.Integer(), nullable=False),
- sa.Column('role', sa.String(length=50), nullable=False),
- sa.Column('content', sa.Text(), nullable=False),
- sa.Column('message_metadata', sa.JSON(), nullable=True),
- sa.Column('timestamp', sa.DateTime(), nullable=False),
- sa.ForeignKeyConstraint(['conversation_id'], ['conversation_history.id'], ),
- sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
- sa.PrimaryKeyConstraint('id')
- )
- # ### end Alembic commands ###
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_table('messages')
- op.drop_table('dataset_chunks')
- op.drop_table('memories')
- op.drop_index(op.f('ix_datasets_name'), table_name='datasets')
- op.drop_table('datasets')
- op.drop_index(op.f('ix_conversation_history_session_id'), table_name='conversation_history')
- op.drop_table('conversation_history')
- op.drop_index(op.f('ix_contract_deployments_contract_signature'), table_name='contract_deployments')
- op.drop_index(op.f('ix_contract_deployments_contract_address'), table_name='contract_deployments')
- op.drop_index(op.f('ix_contract_deployments_chain_id'), table_name='contract_deployments')
- op.drop_index('idx_signature_chain', table_name='contract_deployments')
- op.drop_table('contract_deployments')
- op.drop_index(op.f('ix_users_user_id'), table_name='users')
- op.drop_table('users')
- # ### end Alembic commands ###
diff --git a/alembic/versions/d7ca3e4695b8_add_counter.py b/alembic/versions/d7ca3e4695b8_add_counter.py
deleted file mode 100644
index 856c27c3..00000000
--- a/alembic/versions/d7ca3e4695b8_add_counter.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""add counter
-
-Revision ID: d7ca3e4695b8
-Revises: ba582872fd6c
-Create Date: 2025-09-09 22:03:05.737309
-
-"""
-
-from typing import Sequence, Union
-
-import sqlalchemy as sa
-
-from alembic import op
-
-# revision identifiers, used by Alembic.
-revision: str = "d7ca3e4695b8"
-down_revision: Union[str, None] = "ba582872fd6c"
-branch_labels: Union[str, Sequence[str], None] = None
-depends_on: Union[str, Sequence[str], None] = None
-
-
-def upgrade() -> None:
- """Upgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.create_table(
- "counters",
- sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
- sa.Column("name", sa.String(length=255), nullable=False),
- sa.Column("value", sa.Integer(), nullable=False),
- sa.Column("created_at", sa.DateTime(), nullable=False),
- sa.Column("updated_at", sa.DateTime(), nullable=False),
- sa.PrimaryKeyConstraint("id"),
- )
- op.create_index(op.f("ix_counters_name"), "counters", ["name"], unique=True)
- # ### end Alembic commands ###
-
-
-def downgrade() -> None:
- """Downgrade schema."""
- # ### commands auto generated by Alembic - please adjust! ###
- op.drop_index(op.f("ix_counters_name"), table_name="counters")
- op.drop_table("counters")
- # ### end Alembic commands ###
diff --git a/assets/talos-header.jpeg b/assets/talos-header.jpeg
deleted file mode 100644
index 219c1e68..00000000
Binary files a/assets/talos-header.jpeg and /dev/null differ
diff --git a/dao_communication_playbook/README.md b/dao_communication_playbook/README.md
new file mode 100644
index 00000000..397d2776
--- /dev/null
+++ b/dao_communication_playbook/README.md
@@ -0,0 +1,29 @@
+# Playbook de Comunicação da DAO e Verificação de Artefatos
+
+
+
+Este repositório contém o playbook de comunicação para as propostas de governança da DAO, bem como os scripts para verificar a integridade dos artefatos.
+
+## Playbook de Comunicação (`communication_playbook.json`)
+
+O arquivo `communication_playbook.json` é um recurso centralizado que contém todos os templates de copy para as campanhas de comunicação da DAO. Ele está estruturado por plataforma de mídia social e por estágio da campanha (anúncio, lembrete, última chamada, resultados).
+
+Este arquivo foi projetado para ser "pronto para a máquina", permitindo que scripts de automação usem esses templates para gerar e postar conteúdo dinamicamente.
+
+## Verificação de Integridade
+
+Para garantir que os artefatos de comunicação não foram adulterados, você pode usar os scripts de verificação fornecidos.
+
+### Para Linux/macOS:
+
+```bash
+./verify.sh
+```
+
+### Para Windows (PowerShell):
+
+```powershell
+powershell -ExecutionPolicy Bypass -File verify.ps1
+```
+
+Esses scripts verificam o checksum SHA-256 do arquivo `communication_playbook.json` contra o valor armazenado em `SHA256SUMS`. No futuro, eles também incluirão a verificação de assinatura GPG para uma camada adicional de segurança.
\ No newline at end of file
diff --git a/dao_communication_playbook/SHA256SUMS b/dao_communication_playbook/SHA256SUMS
new file mode 100644
index 00000000..7d7a3b3c
--- /dev/null
+++ b/dao_communication_playbook/SHA256SUMS
@@ -0,0 +1 @@
+17731229d5adb5a1b944fc7c827098fbc0d6f0e73270e1294ce07e77fcafd6c9 communication_playbook.json
diff --git a/dao_communication_playbook/badge-verified-256-gpg.svg b/dao_communication_playbook/badge-verified-256-gpg.svg
new file mode 100644
index 00000000..534c8564
--- /dev/null
+++ b/dao_communication_playbook/badge-verified-256-gpg.svg
@@ -0,0 +1,4 @@
+
+
+ ✔ Verified SHA-256 + GPG
+
\ No newline at end of file
diff --git a/dao_communication_playbook/communication_playbook.json b/dao_communication_playbook/communication_playbook.json
new file mode 100644
index 00000000..33de4884
--- /dev/null
+++ b/dao_communication_playbook/communication_playbook.json
@@ -0,0 +1,84 @@
+{
+ "playbook_version": "1.0",
+ "campaign_stages": {
+ "announcement": {
+ "name": "Anúncio da Votação (Início)",
+ "email": {
+ "subject": "🚀 Vote agora – sua decisão vai moldar o futuro da nossa DAO!",
+ "body": "Olá, {{member_name}}!\n\nA nova proposta **{{proposal_title}}** já está no ar e precisamos do seu voto.\n\n🗳️ **Como votar?** Acesse {{vote_link}} → Clique em “Vote”.\n⏰ **Prazo:** {{deadline}} (faltam {{time_remaining}}).\n\n⚙️ **Dúvidas?** Pergunte ao bot Z.ai no Discord/Telegram: `@Z.ai como voto?`\n\nContamos com sua participação para garantir decisões transparentes e alinhadas à comunidade.\n\nAbraço,\nEquipe de Governança DAO"
+ },
+ "telegram_discord": {
+ "message": "📢 **VOTAÇÃO EM ABERTO**\nProposta: *{{proposal_title}}*\n🔗 Vote aqui: {{vote_link}}\n⏳ Encerramento: {{deadline}} ({{time_remaining}})\n\n❓ Dúvidas? Pergunte ao bot **@Z.ai** (`como voto?`).\n\n#Vote #DAOGovernance"
+ },
+ "twitter_x": {
+ "message": "🚀 A proposta **{{proposal_title}}** está viva! Seu voto define o próximo passo da DAO.\n🗳️ Vote agora → {{vote_link}}\n⏰ Faltam {{time_remaining}}.\n\n#DAO #Governança #VoteAgora"
+ },
+ "linkedin_carousel": [
+ { "slide": 1, "text": "Problema: Precisamos de X" },
+ { "slide": 2, "text": "Proposta: A solução Y" },
+ { "slide": 3, "text": "Benefícios: +30% de eficiência" },
+ { "slide": 4, "text": "Como votar: passo-a-passo" },
+ { "slide": 5, "text": "CTA: Clique e vote" }
+ ],
+ "instagram_post": {
+ "caption": "🗳️ **VOTAÇÃO ABERTA!** A proposta '{{proposal_title}}' está no ar e sua participação é fundamental. Vote através do link na nossa bio!\n\nEncerramento: {{deadline}}.\n\n#DAO #Governança #Web3 #Vote"
+ },
+ "instagram_story": {
+ "text_overlay": "VOTAÇÃO ABERTA\nProposta: {{proposal_title}}\nArraste para cima para votar!",
+ "sticker": "poll"
+ },
+ "reddit": {
+ "subreddit": "r/{{dao_subreddit}}",
+ "title": "[VOTAÇÃO OFICIAL] Proposta: {{proposal_title}}",
+ "body": "Olá comunidade,\n\nA votação para a proposta **{{proposal_title}}** está oficialmente aberta.\n\n**Link para Votar:** {{vote_link}}\n**Link para Discussão no Fórum:** {{discussion_link}}\n**Prazo Final:** {{deadline}}\n\nPor favor, leiam a proposta completa e participem da discussão antes de votar. Todo feedback é bem-vindo.\n\nContamos com vocês!"
+ },
+ "medium_blog": {
+ "title": "Análise da Proposta: {{proposal_title}} – Por que seu voto é crucial",
+ "body_template": "Análise da Proposta: {{proposal_title}} A comunidade da DAO está diante de uma decisão importante... [elaborar sobre o problema e a solução proposta]...
Como Votar A votação está aberta até {{deadline}}. Acesse o link a seguir para registrar seu voto: Página de Votação .
"
+ }
+ },
+ "reminder": {
+ "name": "Lembrete (Meio do Período)",
+ "telegram_discord": {
+ "message": "⏰ **LEMBRETE DE VOTAÇÃO**\nAinda dá tempo de votar na proposta *{{proposal_title}}*.\n🔗 Vote aqui: {{vote_link}}\n⏳ Encerramento: {{deadline}}\n\nSua voz é importante!"
+ },
+ "twitter_x": {
+ "message": "⏰ Ainda não votou? A proposta **{{proposal_title}}** precisa da sua opinião. Não deixe para a última hora!\n🗳️ Vote aqui → {{vote_link}}\n\n#DAO #Governança"
+ }
+ },
+ "last_call": {
+ "name": "Última Chamada (Fim do Período)",
+ "email": {
+ "subject": "⚡ Última chamada – a votação para {{proposal_title}} termina em breve!",
+ "body": "Olá, {{member_name}}!\n\nEsta é a última chamada para votar na proposta **{{proposal_title}}**. A votação encerra em {{time_remaining}}.\n\nNão perca a chance de influenciar esta decisão.\n\n🗳️ **Vote agora:** {{vote_link}}\n\nAbraço,\nEquipe de Governança DAO"
+ },
+ "telegram_discord": {
+ "message": "🚨 **ÚLTIMA CHAMADA - ENCERRAMENTO EM {{time_remaining}}!**\nNão perca a chance de votar na proposta *{{proposal_title}}*.\n🔗 Vote agora: {{vote_link}}"
+ },
+ "twitter_x": {
+ "message": "🚨 **Última chamada!** A votação para **{{proposal_title}}** encerra em {{time_remaining}}. Este é o seu último momento para votar!\n🗳️ Vote JÁ → {{vote_link}}\n\n#DAO #LastCall"
+ },
+ "instagram_story": {
+ "text_overlay": "ÚLTIMA CHAMADA!\nVOTAÇÃO ENCERRA EM {{time_remaining}}",
+ "sticker": "countdown"
+ }
+ },
+ "results": {
+ "name": "Resultados da Votação",
+ "email": {
+ "subject": "📊 Resultados da votação: {{proposal_title}}",
+ "body": "Olá, {{member_name}}!\n\nA votação para a proposta **{{proposal_title}}** foi concluída.\n\n**Resultado:** {{result_summary}}\n\n**Detalhes:**\n- Votos 'Sim': {{yes_votes}} ({{yes_percentage}}%)\n- Votos 'Não': {{no_votes}} ({{no_percentage}}%)\n- Participação total: {{total_votes}}\n\nObrigado a todos que participaram!\n\nAbraço,\nEquipe de Governança DAO"
+ },
+ "telegram_discord": {
+ "message": "📊 **RESULTADO DA VOTAÇÃO**\nProposta: *{{proposal_title}}*\n\n**Aprovada:** {{result_status}}\n- ✅ Sim: {{yes_percentage}}%\n- ❌ Não: {{no_percentage}}%\n\nObrigado a todos pela participação!"
+ },
+ "twitter_x": {
+ "message": "📊 Resultados da votação para **{{proposal_title}}**:\n\n✅ Aprovada: {{result_status}}\n\nObrigado à comunidade pela participação ativa na governança. Vamos construir juntos!\n\n#DAO #Governança #Resultados"
+ },
+ "medium_blog": {
+ "title": "Resultado e Próximos Passos: Proposta {{proposal_title}}",
+ "body_template": "Resultados da Votação A proposta {{proposal_title}} foi **{{result_status}}** com {{yes_percentage}}% dos votos a favor.
O que isso significa para a DAO? [Analisar o impacto da decisão e os próximos passos]...
Transparência O registro completo da votação pode ser auditado aqui: {{vote_audit_link}}.
"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dao_communication_playbook/verify.ps1 b/dao_communication_playbook/verify.ps1
new file mode 100755
index 00000000..d553218b
--- /dev/null
+++ b/dao_communication_playbook/verify.ps1
@@ -0,0 +1,21 @@
+Set-StrictMode -Version Latest
+Write-Host "🔐 Aurum Grid – Verificação de Integridade dos Artefatos de Comunicação" -ForegroundColor Cyan
+
+$hash = (Get-FileHash -Algorithm SHA256 communication_playbook.json).Hash.ToLower()
+$expected = (Get-Content SHA256SUMS).Split(' ')[0]
+
+if ($hash -ne $expected) {
+ Write-Host "❌ Hash inválido para communication_playbook.json – abortando" -ForegroundColor Red; exit 1
+}
+Write-Host "✅ Hash SHA-256 OK" -ForegroundColor Green
+
+# Placeholder for GPG verification
+Write-Host "⚠️ Assinatura GPG não implementada neste script. Verificação de hash concluída." -ForegroundColor Yellow
+
+# gpg --verify SHA256SUMS.asc SHA256SUMS --quiet
+# if ($LASTEXITCODE -ne 0) {
+# Write-Host "❌ Assinatura GPG inválida – abortando" -ForegroundColor Red; exit 1
+# }
+# Write-Host "✅ Assinatura GPG OK" -ForegroundColor Green
+
+Write-Host "🎉 Pacote verificado com sucesso (apenas hash)" -ForegroundColor White
\ No newline at end of file
diff --git a/dao_communication_playbook/verify.sh b/dao_communication_playbook/verify.sh
new file mode 100755
index 00000000..4b57cefd
--- /dev/null
+++ b/dao_communication_playbook/verify.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+set -e
+echo "🔐 Aurum Grid – Verificação de Integridade dos Artefatos de Comunicação"
+if ! sha256sum -c SHA256SUMS --quiet; then
+ echo "❌ Hash inválido para communication_playbook.json – abortando"; exit 1
+fi
+echo "✅ Hash SHA-256 OK"
+
+# Placeholder for GPG verification
+echo "⚠️ Assinatura GPG não implementada neste script. Verificação de hash concluída."
+
+# if ! gpg --verify SHA256SUMS.asc SHA256SUMS --quiet; then
+# echo "❌ Assinatura GPG inválida – abortando"; exit 1
+# fi
+# echo "✅ Assinatura GPG OK"
+
+echo "🎉 Pacote verificado com sucesso (apenas hash)"
\ No newline at end of file
diff --git a/disabled_test_interactive.py b/disabled_test_interactive.py
deleted file mode 100644
index fa366e6d..00000000
--- a/disabled_test_interactive.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import os
-import subprocess
-
-# Set dummy environment variables
-env = os.environ.copy()
-env["OPENAI_API_KEY"] = "dummy"
-env["GITHUB_API_TOKEN"] = "dummy"
-
-# Start the talos process
-process = subprocess.Popen(
- ["talos"],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- env=env,
-)
-
-# Send input to the process
-process.stdin.write("hello\n")
-process.stdin.flush()
-process.stdin.write("exit\n")
-process.stdin.flush()
-
-# Read the output
-stdout, stderr = process.communicate()
-
-# Check the output
-print("stdout:")
-print(stdout)
-print("stderr:")
-print(stderr)
-assert "Entering interactive mode." in stdout
-assert ">>" in stdout
-assert "hello" in stdout
-
-print("Test passed!")
diff --git a/docker-compose.rofl.yml b/docker-compose.rofl.yml
deleted file mode 100644
index 45551f13..00000000
--- a/docker-compose.rofl.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-services:
- talos-agent:
- build: .
- # NOTE: Run ./scripts/build_and_push_container_image.sh to retrieve the digest.
- image: ghcr.io/talos-agent/talos:latest-agent@sha256:ed66b3a4e2e71eb9c97e2fe0c14b0d2aa8c778de09c67a5642a500a138ed6871
- container_name: talos-agent
- restart: unless-stopped
- environment:
- - OPENAI_API_KEY=${OPENAI_API_KEY}
- - GITHUB_API_TOKEN=${GITHUB_API_TOKEN}
- - TWITTER_BEARER_TOKEN=${TWITTER_BEARER_TOKEN}
- - PINATA_API_KEY=${PINATA_API_KEY}
- - PINATA_SECRET_API_KEY=${PINATA_SECRET_API_KEY}
- volumes:
- - talos_data:/app/data
- - /run/rofl-appd.sock:/run/rofl-appd.sock
- ports:
- - "8080:8080"
-
-volumes:
- talos_data:
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index 86881b65..00000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-services:
- talos-agent:
- build: .
- container_name: talos-agent
- restart: unless-stopped
- ports:
- - "8080:8080"
- environment:
- - OPENAI_API_KEY=${OPENAI_API_KEY}
- - GITHUB_API_TOKEN=${GITHUB_API_TOKEN}
- - TWITTER_BEARER_TOKEN=${TWITTER_BEARER_TOKEN}
- - PINATA_API_KEY=${PINATA_API_KEY}
- - PINATA_SECRET_API_KEY=${PINATA_SECRET_API_KEY}
- volumes:
- - talos_data:/app/data
-
-volumes:
- talos_data:
diff --git a/docs/STRUCTURED_DAG_TUTORIAL.md b/docs/STRUCTURED_DAG_TUTORIAL.md
deleted file mode 100644
index 6125e935..00000000
--- a/docs/STRUCTURED_DAG_TUTORIAL.md
+++ /dev/null
@@ -1,483 +0,0 @@
-# Structured DAG Framework Tutorial
-
-## Overview
-
-The Structured DAG Framework is a blockchain-native AI system that enables individual component upgrades while maintaining deterministic behavior and system integrity. It orchestrates a network of specialized support agents through a structured Directed Acyclic Graph (DAG) architecture.
-
-## Table of Contents
-
-1. [Architecture Overview](#architecture-overview)
-2. [Key Concepts](#key-concepts)
-3. [Getting Started](#getting-started)
-4. [Node Versioning](#node-versioning)
-5. [Upgrade Workflows](#upgrade-workflows)
-6. [Blockchain Integration](#blockchain-integration)
-7. [Best Practices](#best-practices)
-8. [Troubleshooting](#troubleshooting)
-
-## Architecture Overview
-
-### Blockchain-Native Design
-
-The framework is designed from the ground up for blockchain compatibility:
-
-- **Deterministic Execution**: All operations produce reproducible results
-- **Individual Node Upgrades**: Enable granular system evolution
-- **Hash-based Verification**: Prevents tampering and ensures integrity
-- **Serializable State**: Enables on-chain storage and verification
-
-### Core Components
-
-```
-┌─────────────────────────────────────────────────────────────┐
-│ StructuredMainAgent │
-├─────────────────────────────────────────────────────────────┤
-│ ┌─────────────────┐ ┌─────────────────────────────────┐ │
-│ │ StructuredDAG │ │ SupportAgent Registry │ │
-│ │ Manager │ │ │ │
-│ │ │ │ ┌─────────────┐ ┌─────────────┐│ │
-│ │ • Node Upgrades │ │ │ Governance │ │ Analytics ││ │
-│ │ • Version Mgmt │ │ │ Agent │ │ Agent ││ │
-│ │ • Serialization │ │ │ │ │ ││ │
-│ └─────────────────┘ │ └─────────────┘ └─────────────┘│ │
-└─────────────────────────────────────────────────────────────┘
- │
- ▼
-┌─────────────────────────────────────────────────────────────┐
-│ Structured DAG │
-├─────────────────────────────────────────────────────────────┤
-│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │
-│ │ Router │ │ Support │ │ Prompt/Data │ │
-│ │ Node │ │ Agent │ │ Nodes │ │
-│ │ │ │ Nodes │ │ │ │
-│ │ • Keywords │ │ │ │ • Shared Resources │ │
-│ │ • Hash-based│ │ • Versioned │ │ • Deterministic │ │
-│ │ • Routing │ │ • Upgradeable│ │ • Configurable │ │
-│ └─────────────┘ └─────────────┘ └─────────────────────┘ │
-└─────────────────────────────────────────────────────────────┘
-```
-
-## Key Concepts
-
-### Support Agents
-
-Support agents are specialized AI components with specific domain expertise:
-
-- **Domain-Specific**: Each agent handles a particular area (governance, analytics, etc.)
-- **Versioned**: Individual semantic versioning for controlled upgrades
-- **Configurable**: Custom architectures and delegation patterns
-- **Isolated**: Independent upgrade paths without affecting other agents
-
-### Node Versioning
-
-The framework uses semantic versioning (major.minor.patch) with three upgrade policies:
-
-- **Compatible**: Only allows upgrades within the same major version
-- **Exact**: Requires exact version matches (no upgrades allowed)
-- **Any**: Allows any newer version upgrade (use with caution)
-
-### Deterministic Delegation
-
-Task routing uses deterministic patterns:
-
-- **Keyword Matching**: Sorted rule evaluation for reproducible results
-- **Hash Verification**: Ensures delegation rules haven't been tampered with
-- **Fallback Mechanisms**: Default routing for unmatched queries
-
-## Getting Started
-
-### Basic Setup
-
-```python
-from langchain_openai import ChatOpenAI
-from talos.core.extensible_agent import StructuredMainAgent
-from talos.dag.structured_nodes import NodeVersion
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
-# Initialize the structured agent
-model = ChatOpenAI(model="gpt-4")
-prompt_manager = FilePromptManager("/path/to/prompts")
-
-agent = StructuredMainAgent(
- model=model,
- prompts_dir="/path/to/prompts",
- prompt_manager=prompt_manager,
- verbose=True,
- use_database_memory=False
-)
-```
-
-### Creating Custom Support Agents
-
-```python
-from talos.core.extensible_agent import SupportAgent
-
-# Define a custom support agent
-custom_agent = SupportAgent(
- name="security",
- domain="security",
- description="Security analysis and validation agent",
- architecture={
- "task_flow": ["scan", "analyze", "validate", "report"],
- "decision_points": ["threat_level", "validation_method", "response_action"],
- "capabilities": ["vulnerability_scanning", "threat_analysis", "compliance_check"]
- },
- delegation_keywords=["security", "vulnerability", "threat", "compliance"],
- task_patterns=["scan for vulnerabilities", "analyze threats", "validate security"]
-)
-```
-
-## Node Versioning
-
-### Version Compatibility
-
-```python
-from talos.dag.structured_nodes import NodeVersion
-
-# Create versions
-v1_0_0 = NodeVersion(major=1, minor=0, patch=0)
-v1_1_0 = NodeVersion(major=1, minor=1, patch=0)
-v2_0_0 = NodeVersion(major=2, minor=0, patch=0)
-
-# Check compatibility
-print(v1_0_0.is_compatible_with(v1_1_0)) # True - same major version
-print(v1_0_0.is_compatible_with(v2_0_0)) # False - different major version
-print(v1_1_0.is_newer_than(v1_0_0)) # True - higher minor version
-```
-
-### Upgrade Policies
-
-```python
-# Compatible policy - allows upgrades within same major version
-node.upgrade_policy = "compatible"
-node.can_upgrade_to(NodeVersion(1, 1, 0)) # True if current is 1.0.0
-node.can_upgrade_to(NodeVersion(2, 0, 0)) # False - major version change
-
-# Exact policy - no upgrades allowed
-node.upgrade_policy = "exact"
-node.can_upgrade_to(NodeVersion(1, 0, 1)) # False - any change blocked
-
-# Any policy - allows any newer version
-node.upgrade_policy = "any"
-node.can_upgrade_to(NodeVersion(2, 0, 0)) # True - any upgrade allowed
-```
-
-## Upgrade Workflows
-
-### Individual Node Upgrades
-
-```python
-# 1. Validate upgrade compatibility
-validation = agent.validate_upgrade("governance", NodeVersion(1, 1, 0))
-if not validation["valid"]:
- print(f"Upgrade blocked: {validation['reason']}")
- exit()
-
-# 2. Create enhanced agent
-enhanced_agent = SupportAgent(
- name="governance_v2",
- domain="governance",
- description="Enhanced governance with improved consensus",
- architecture={
- "task_flow": ["validate", "analyze", "simulate", "execute", "confirm"],
- "decision_points": ["proposal_validity", "consensus_mechanism", "execution_safety", "rollback_plan"],
- "capabilities": ["proposal_validation", "consensus_coordination", "safe_execution", "simulation"]
- },
- delegation_keywords=["governance", "proposal", "vote", "consensus", "dao"],
- task_patterns=["validate proposal", "coordinate consensus", "execute governance", "simulate outcome"]
-)
-
-# 3. Perform upgrade
-success = agent.upgrade_support_agent(
- "governance",
- enhanced_agent,
- NodeVersion(1, 1, 0)
-)
-
-if success:
- print("Upgrade completed successfully")
-else:
- print("Upgrade failed")
-```
-
-### Rollback Operations
-
-```python
-# Rollback to previous version
-success = agent.rollback_node("governance", NodeVersion(1, 0, 0))
-if success:
- print("Rollback completed successfully")
-else:
- print("Rollback failed - version may be newer than current")
-```
-
-### System Status Monitoring
-
-```python
-# Get comprehensive system status
-status = agent.get_structured_status()
-print(f"DAG has {status['total_nodes']} nodes")
-print(f"Blockchain ready: {status['blockchain_ready']}")
-
-for node_id, info in status['structured_nodes'].items():
- print(f"{node_id}: v{info['version']} (policy: {info['upgrade_policy']})")
-
-# Get individual node status
-node_status = agent.get_node_status("governance")
-print(f"Governance agent v{node_status['version']}")
-print(f"Node hash: {node_status['node_hash']}")
-print(f"Keywords: {node_status['delegation_keywords']}")
-```
-
-## Blockchain Integration
-
-### Deterministic Serialization
-
-The framework ensures all operations produce deterministic, reproducible results:
-
-```python
-# Export DAG for blockchain storage
-blockchain_data = agent.export_for_blockchain()
-print(f"DAG version: {blockchain_data.get('dag_version')}")
-print(f"Checksum: {blockchain_data.get('checksum')}")
-print(f"Nodes: {len(blockchain_data.get('nodes', {}))}")
-```
-
-### Hash-based Verification
-
-All components include deterministic hashes for integrity verification:
-
-- **Node Hashes**: Calculated from sorted node properties
-- **Delegation Hashes**: Verify routing rule integrity
-- **DAG Checksums**: Overall system integrity verification
-
-### On-chain Storage Format
-
-The export format is optimized for blockchain storage:
-
-```json
-{
- "dag_version": "1.0.0",
- "checksum": "a1b2c3d4e5f6...",
- "nodes": {
- "governance_agent": {
- "version": "1.1.0",
- "hash": "f6e5d4c3b2a1...",
- "config": {...}
- }
- },
- "edges": [...],
- "delegation_rules": {...}
-}
-```
-
-## Best Practices
-
-### Version Management
-
-1. **Use Semantic Versioning**: Follow semver principles for clear upgrade paths
-2. **Test Upgrades**: Validate compatibility before production upgrades
-3. **Document Changes**: Maintain clear upgrade documentation
-4. **Gradual Rollouts**: Test upgrades in staging environments first
-
-### Security Considerations
-
-1. **Hash Verification**: Always verify node and delegation hashes
-2. **Upgrade Validation**: Use appropriate upgrade policies for your use case
-3. **Rollback Planning**: Maintain rollback capabilities for critical systems
-4. **Access Control**: Implement proper permissions for upgrade operations
-
-### Performance Optimization
-
-1. **Selective Upgrades**: Only upgrade nodes that need changes
-2. **Batch Operations**: Group related upgrades when possible
-3. **Hash Caching**: Leverage deterministic hashes for verification
-4. **Minimal Rebuilds**: Only rebuild affected DAG components
-
-### Monitoring and Observability
-
-1. **Status Monitoring**: Regularly check system status
-2. **Version Tracking**: Monitor node versions and upgrade history
-3. **Hash Verification**: Validate integrity through hash checking
-4. **Performance Metrics**: Track upgrade and execution performance
-
-## Troubleshooting
-
-### Common Issues
-
-#### Upgrade Validation Failures
-
-**Problem**: Node upgrade fails with "Incompatible version" error
-
-**Solution**:
-```python
-# Check current upgrade policy
-status = agent.get_node_status("governance")
-print(f"Current policy: {status['upgrade_policy']}")
-
-# Validate upgrade before attempting
-validation = agent.validate_upgrade("governance", NodeVersion(2, 0, 0))
-if not validation["valid"]:
- print(f"Reason: {validation['reason']}")
-
-# Consider using force upgrade if necessary (use with caution)
-success = agent.upgrade_support_agent(
- "governance",
- new_agent,
- NodeVersion(2, 0, 0),
- force=True
-)
-```
-
-#### DAG Construction Failures
-
-**Problem**: Structured DAG fails to build
-
-**Solution**:
-```python
-# Check for missing dependencies
-try:
- agent._build_structured_dag()
-except Exception as e:
- print(f"DAG build error: {e}")
- # Check prompt manager and model configuration
-```
-
-#### Hash Mismatches
-
-**Problem**: Node hashes don't match expected values
-
-**Solution**:
-```python
-# Recalculate node hash
-node = agent.structured_dag_manager.node_registry["governance"]
-expected_hash = node._calculate_node_hash()
-print(f"Expected hash: {expected_hash}")
-print(f"Current hash: {node.node_hash}")
-```
-
-### Debugging Tips
-
-1. **Enable Verbose Mode**: Set `verbose=True` when creating agents
-2. **Check Status Regularly**: Use `get_structured_status()` for system overview
-3. **Validate Before Upgrading**: Always run `validate_upgrade()` first
-4. **Monitor CI/CD**: Ensure all checks pass before deployment
-
-## Advanced Usage
-
-### Custom Upgrade Policies
-
-```python
-# Create node with custom upgrade policy
-custom_node = StructuredSupportAgentNode(
- node_id="custom_agent",
- name="Custom Agent",
- support_agent=custom_agent,
- node_version=NodeVersion(1, 0, 0),
- upgrade_policy="exact" # No upgrades allowed
-)
-```
-
-### Blockchain Integration
-
-```python
-# Export for blockchain storage
-blockchain_data = agent.export_for_blockchain()
-
-# Store on blockchain (pseudo-code)
-blockchain_client.store_dag_config(
- config=blockchain_data,
- checksum=blockchain_data["checksum"]
-)
-
-# Verify integrity
-stored_config = blockchain_client.retrieve_dag_config()
-assert stored_config["checksum"] == blockchain_data["checksum"]
-```
-
-### Multi-Environment Deployment
-
-```python
-# Development environment
-dev_agent = StructuredMainAgent(
- model=ChatOpenAI(model="gpt-3.5-turbo"),
- prompts_dir="/dev/prompts",
- verbose=True
-)
-
-# Production environment
-prod_agent = StructuredMainAgent(
- model=ChatOpenAI(model="gpt-4"),
- prompts_dir="/prod/prompts",
- verbose=False
-)
-
-# Ensure consistent configuration
-dev_status = dev_agent.get_structured_status()
-prod_status = prod_agent.get_structured_status()
-assert dev_status["delegation_hash"] == prod_status["delegation_hash"]
-```
-
-## API Reference
-
-### StructuredMainAgent
-
-The main entry point for the structured DAG framework.
-
-#### Methods
-
-- `upgrade_support_agent(domain, new_agent, new_version, force=False)`: Upgrade individual node
-- `validate_upgrade(domain, new_version)`: Validate upgrade compatibility
-- `rollback_node(domain, target_version)`: Rollback to previous version
-- `get_node_status(domain)`: Get detailed node information
-- `get_structured_status()`: Get comprehensive system status
-- `export_for_blockchain()`: Export for blockchain storage
-- `delegate_task(query, context=None)`: Execute task through DAG
-
-### NodeVersion
-
-Semantic versioning for DAG nodes.
-
-#### Methods
-
-- `is_compatible_with(other)`: Check version compatibility
-- `is_newer_than(other)`: Compare version precedence
-- `__str__()`: String representation (major.minor.patch)
-
-### StructuredSupportAgentNode
-
-Individual DAG node with versioning capabilities.
-
-#### Methods
-
-- `can_upgrade_to(new_version)`: Check upgrade eligibility
-- `execute(state)`: Process graph state
-- `get_node_config()`: Get serializable configuration
-- `_calculate_node_hash()`: Generate deterministic hash
-
-### StructuredDAGManager
-
-Manager for DAG operations and upgrades.
-
-#### Methods
-
-- `create_structured_dag(...)`: Build deterministic DAG
-- `upgrade_node(domain, new_agent, new_version, force=False)`: Perform node upgrade
-- `validate_upgrade(domain, new_version)`: Validate upgrade request
-- `rollback_node(domain, target_version)`: Rollback node version
-- `get_structured_dag_status()`: Get comprehensive DAG status
-- `export_for_blockchain()`: Export DAG configuration
-
-## Conclusion
-
-The Structured DAG Framework provides a robust foundation for blockchain-native AI systems with individual component upgrades. By following the patterns and best practices outlined in this tutorial, you can build scalable, maintainable AI systems that evolve safely over time.
-
-Key takeaways:
-
-1. **Deterministic Design**: All operations produce reproducible results
-2. **Individual Upgrades**: Components can be upgraded independently
-3. **Version Safety**: Semantic versioning prevents breaking changes
-4. **Blockchain Ready**: Built for on-chain deployment and verification
-5. **Comprehensive Monitoring**: Full visibility into system state and health
-
-For additional support and examples, refer to the demo script and test cases in the repository.
diff --git a/docs/api/core.md b/docs/api/core.md
deleted file mode 100644
index d51266d3..00000000
--- a/docs/api/core.md
+++ /dev/null
@@ -1,468 +0,0 @@
-# Core API Reference
-
-This document provides detailed API reference for the core components of the Talos system.
-
-## Agent Classes
-
-### Agent
-
-The base agent class that provides core functionality for all AI agents in the system.
-
-```python
-class Agent:
- def __init__(
- self,
- name: str,
- model: str = "gpt-5",
- memory: Optional[Memory] = None
- ):
- """Initialize an agent with specified configuration.
-
- Args:
- name: Unique identifier for the agent
- model: LLM model to use (default: "gpt-5")
- memory: Optional memory instance for conversation history
- """
-```
-
-#### Methods
-
-##### `process_query(query: str) -> QueryResponse`
-
-Process a user query and return a structured response.
-
-**Parameters:**
-- `query` (str): The user's query or request
-
-**Returns:**
-- `QueryResponse`: Structured response containing answers and metadata
-
-**Raises:**
-- `ValidationError`: If query is empty or invalid
-- `APIError`: If LLM service is unavailable
-
-**Example:**
-```python
-agent = Agent(name="my_agent")
-response = agent.process_query("What is the current market sentiment?")
-print(response.answers[0])
-```
-
-##### `add_memory(description: str, metadata: Optional[dict] = None) -> None`
-
-Add a memory to the agent's persistent memory system.
-
-**Parameters:**
-- `description` (str): Description of the memory to store
-- `metadata` (Optional[dict]): Additional metadata for the memory
-
-**Example:**
-```python
-agent.add_memory(
- "User prefers conservative investment strategies",
- {"category": "preference", "importance": "high"}
-)
-```
-
-##### `search_memory(query: str, limit: int = 10) -> List[Memory]`
-
-Search the agent's memory for relevant information.
-
-**Parameters:**
-- `query` (str): Search query
-- `limit` (int): Maximum number of results to return
-
-**Returns:**
-- `List[Memory]`: List of relevant memories
-
-### MainAgent
-
-The primary agent that orchestrates all system components and handles user interactions.
-
-```python
-class MainAgent(Agent):
- def __init__(self):
- """Initialize the main agent with all system components."""
-```
-
-#### Methods
-
-##### `run(query: str, history: Optional[List[Message]] = None) -> AIMessage`
-
-Execute a query through the complete system pipeline.
-
-**Parameters:**
-- `query` (str): User query to process
-- `history` (Optional[List[Message]]): Conversation history
-
-**Returns:**
-- `AIMessage`: AI response message
-
-**Example:**
-```python
-main_agent = MainAgent()
-response = main_agent.run("Analyze the latest governance proposal")
-print(response.content)
-```
-
-## Memory System
-
-### Memory
-
-Persistent memory system with semantic search capabilities.
-
-```python
-class Memory:
- def __init__(
- self,
- agent_name: str,
- batch_size: int = 10,
- auto_save: bool = True
- ):
- """Initialize memory system.
-
- Args:
- agent_name: Name of the agent using this memory
- batch_size: Number of memories to batch before writing
- auto_save: Whether to automatically save on destruction
- """
-```
-
-#### Methods
-
-##### `add_memory(description: str, metadata: Optional[dict] = None) -> None`
-
-Add a new memory with optional metadata.
-
-**Parameters:**
-- `description` (str): Memory description
-- `metadata` (Optional[dict]): Additional metadata
-
-##### `search(query: str, limit: int = 10) -> List[MemoryItem]`
-
-Search memories using semantic similarity.
-
-**Parameters:**
-- `query` (str): Search query
-- `limit` (int): Maximum results to return
-
-**Returns:**
-- `List[MemoryItem]`: Relevant memories sorted by similarity
-
-##### `flush() -> None`
-
-Manually flush pending writes to persistent storage.
-
-### MemoryItem
-
-Individual memory item with metadata and embeddings.
-
-```python
-class MemoryItem(BaseModel):
- description: str
- metadata: dict
- timestamp: datetime
- embedding: Optional[List[float]] = None
-```
-
-## Skill and Service Management
-
-Skills and services are now managed directly by the MainAgent without a separate Router component.
-
-##### `register_skill(skill: Skill, keywords: List[str]) -> None`
-
-Register a skill with associated keywords for routing.
-
-**Parameters:**
-- `skill` (Skill): Skill instance to register
-- `keywords` (List[str]): Keywords that trigger this skill
-
-##### `register_service(service: Service, keywords: List[str]) -> None`
-
-Register a service with associated keywords for routing.
-
-**Parameters:**
-- `service` (Service): Service instance to register
-- `keywords` (List[str]): Keywords that trigger this service
-
-##### `route(query: str) -> Union[Skill, Service, None]`
-
-Route a query to the most appropriate skill or service.
-
-**Parameters:**
-- `query` (str): User query to route
-
-**Returns:**
-- `Union[Skill, Service, None]`: Best matching handler or None
-
-## Data Models
-
-### QueryResponse
-
-Structured response from agent queries.
-
-```python
-class QueryResponse(BaseModel):
- answers: List[str]
- metadata: dict = Field(default_factory=dict)
- confidence: Optional[float] = None
- sources: List[str] = Field(default_factory=list)
-```
-
-### Message
-
-Base message class for conversation history.
-
-```python
-class Message(BaseModel):
- content: str
- role: str # "human", "assistant", "system"
- timestamp: datetime = Field(default_factory=datetime.now)
- metadata: dict = Field(default_factory=dict)
-```
-
-### HumanMessage
-
-Message from human users.
-
-```python
-class HumanMessage(Message):
- role: str = "human"
-```
-
-### AIMessage
-
-Message from AI agents.
-
-```python
-class AIMessage(Message):
- role: str = "assistant"
-```
-
-### SystemMessage
-
-System-generated messages.
-
-```python
-class SystemMessage(Message):
- role: str = "system"
-```
-
-## Configuration
-
-### AgentConfig
-
-Configuration for agent initialization.
-
-```python
-class AgentConfig(BaseModel):
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str
- model: str = "gpt-5"
- temperature: float = Field(default=0.7, ge=0.0, le=2.0)
- max_tokens: int = Field(default=1000, gt=0)
- memory_enabled: bool = True
- batch_size: int = Field(default=10, gt=0)
-```
-
-### MemoryConfig
-
-Configuration for memory system.
-
-```python
-class MemoryConfig(BaseModel):
- batch_size: int = Field(default=10, gt=0)
- auto_save: bool = True
- max_memories: Optional[int] = None
- embedding_model: str = "text-embedding-ada-002"
-```
-
-## Error Classes
-
-### TalosError
-
-Base exception for all Talos-specific errors.
-
-```python
-class TalosError(Exception):
- """Base exception for all Talos errors."""
- pass
-```
-
-### ValidationError
-
-Raised when input validation fails.
-
-```python
-class ValidationError(TalosError):
- """Raised when input validation fails."""
- pass
-```
-
-### APIError
-
-Raised when external API calls fail.
-
-```python
-class APIError(TalosError):
- """Raised when external API calls fail."""
-
- def __init__(self, message: str, status_code: Optional[int] = None):
- super().__init__(message)
- self.status_code = status_code
-```
-
-### ConfigurationError
-
-Raised when configuration is invalid.
-
-```python
-class ConfigurationError(TalosError):
- """Raised when configuration is invalid."""
- pass
-```
-
-## Usage Examples
-
-### Basic Agent Usage
-
-```python
-from talos.core.agent import Agent
-from talos.core.memory import Memory
-
-# Create agent with custom memory
-memory = Memory(agent_name="example_agent", batch_size=5)
-agent = Agent(name="example_agent", memory=memory)
-
-# Process queries
-response = agent.process_query("What are the latest DeFi trends?")
-print(f"Response: {response.answers[0]}")
-
-# Add memories
-agent.add_memory("User is interested in DeFi trends", {"topic": "defi"})
-
-# Search memories
-memories = agent.search_memory("DeFi", limit=5)
-for memory in memories:
- print(f"Memory: {memory.description}")
-```
-
-### Main Agent Usage
-
-```python
-from talos.core.main_agent import MainAgent
-
-# Create main agent (includes all system components)
-main_agent = MainAgent()
-
-# Process complex queries
-response = main_agent.run("Analyze sentiment for 'yield farming' and recommend APR adjustments")
-print(response.content)
-
-# Interactive conversation
-history = []
-while True:
- user_input = input(">> ")
- if user_input.lower() == 'exit':
- break
-
- response = main_agent.run(user_input, history=history)
- print(response.content)
-
- # History is automatically managed by the agent
-```
-
-### Memory System Usage
-
-```python
-from talos.core.memory import Memory
-
-# Create memory system
-memory = Memory(agent_name="test_agent", batch_size=20)
-
-# Add memories with metadata
-memory.add_memory(
- "Protocol X increased APR to 12% due to market competition",
- {
- "protocol": "Protocol X",
- "action": "apr_increase",
- "value": 0.12,
- "reason": "competition"
- }
-)
-
-# Search for relevant memories
-results = memory.search("APR increase competition", limit=10)
-for result in results:
- print(f"Memory: {result.description}")
- print(f"Metadata: {result.metadata}")
-
-# Manual flush if needed
-memory.flush()
-```
-
-### MainAgent Skill Management
-
-```python
-from talos.core.main_agent import MainAgent
-from talos.skills.proposals import ProposalsSkill
-from talos.skills.twitter_sentiment import TwitterSentimentSkill
-
-# Create main agent (skills are automatically registered)
-agent = MainAgent(model=model, prompts_dir="prompts")
-
-# Skills are managed directly by MainAgent
-# Access skills through agent.skills list
-for skill in agent.skills:
- print(f"Available skill: {skill.name}")
-
-# Use agent to process queries
-result = agent.run("Analyze this governance proposal")
-print(result)
-```
-
-## Error Handling
-
-All core API methods include comprehensive error handling. Always wrap API calls in try-catch blocks:
-
-```python
-from talos.core.agent import Agent
-from talos.core.exceptions import ValidationError, APIError
-
-agent = Agent(name="example")
-
-try:
- response = agent.process_query("What is DeFi?")
- print(response.answers[0])
-except ValidationError as e:
- print(f"Invalid input: {e}")
-except APIError as e:
- print(f"API error: {e}")
- if e.status_code:
- print(f"Status code: {e.status_code}")
-except Exception as e:
- print(f"Unexpected error: {e}")
-```
-
-## Performance Considerations
-
-### Memory Management
-
-- Use batch operations for multiple memory additions
-- Call `flush()` manually for time-sensitive operations
-- Monitor memory usage in long-running processes
-
-### API Rate Limiting
-
-- Implement backoff strategies for API calls
-- Cache responses when appropriate
-- Use connection pooling for external services
-
-### Concurrency
-
-- Core components are thread-safe for read operations
-- Use locks for concurrent write operations
-- Consider async patterns for I/O-bound operations
-
-This API reference provides the foundation for building applications with the Talos core system. For specific integrations and advanced usage patterns, refer to the Skills and Services API documentation.
diff --git a/docs/api/services.md b/docs/api/services.md
deleted file mode 100644
index 49c5b9a6..00000000
--- a/docs/api/services.md
+++ /dev/null
@@ -1,459 +0,0 @@
-# Services API Reference
-
-This document provides detailed API reference for the services layer of the Talos system.
-
-## Base Service Interface
-
-### Service
-
-Abstract base class for all services in the Talos system.
-
-```python
-from abc import ABC, abstractmethod
-
-class Service(ABC):
- @abstractmethod
- def process(self, request: ServiceRequest) -> ServiceResponse:
- """Process a service request and return response."""
- pass
-```
-
-## Service Models
-
-### ServiceRequest
-
-Base request model for all service operations.
-
-```python
-class ServiceRequest(BaseModel):
- request_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- timestamp: datetime = Field(default_factory=datetime.now)
- metadata: dict = Field(default_factory=dict)
-```
-
-### ServiceResponse
-
-Base response model for all service operations.
-
-```python
-class ServiceResponse(BaseModel):
- request_id: str
- success: bool
- data: Any = None
- error: Optional[str] = None
- metadata: dict = Field(default_factory=dict)
- processing_time: Optional[float] = None
-```
-
-## Yield Management Services
-
-### YieldManagerService
-
-Calculates optimal staking APR using market data and sentiment analysis.
-
-```python
-class YieldManagerService(Service):
- def __init__(self, model: str = "gpt-5"):
- """Initialize yield manager service.
-
- Args:
- model: LLM model to use for analysis
- """
-```
-
-#### Methods
-
-##### `process(request: YieldManagerRequest) -> YieldManagerResponse`
-
-Calculate optimal APR based on market conditions and sentiment.
-
-**Parameters:**
-- `request` (YieldManagerRequest): Request containing market data and parameters
-
-**Returns:**
-- `YieldManagerResponse`: Response with recommended APR and analysis
-
-**Example:**
-```python
-from talos.services.yield_manager import YieldManagerService, YieldManagerRequest
-
-service = YieldManagerService()
-request = YieldManagerRequest(
- current_apr=0.05,
- market_volatility=0.15,
- competitor_aprs=[0.06, 0.07, 0.055],
- sentiment_score=0.7,
- tvl=1000000,
- utilization_rate=0.8
-)
-
-response = service.process(request)
-print(f"Recommended APR: {response.recommended_apr}")
-print(f"Reasoning: {response.reasoning}")
-```
-
-### YieldManagerRequest
-
-Request model for yield management operations.
-
-```python
-class YieldManagerRequest(ServiceRequest):
- current_apr: float = Field(ge=0.0, le=1.0)
- market_volatility: float = Field(ge=0.0)
- competitor_aprs: List[float] = Field(default_factory=list)
- sentiment_score: float = Field(ge=-1.0, le=1.0)
- tvl: float = Field(ge=0.0)
- utilization_rate: float = Field(ge=0.0, le=1.0)
- risk_tolerance: str = Field(default="medium") # low, medium, high
-```
-
-### YieldManagerResponse
-
-Response model for yield management operations.
-
-```python
-class YieldManagerResponse(ServiceResponse):
- recommended_apr: float
- confidence: float = Field(ge=0.0, le=1.0)
- reasoning: str
- risk_assessment: str
- market_analysis: dict
- implementation_timeline: str
-```
-
-## Sentiment Analysis Services
-
-### TalosSentimentService
-
-Orchestrates comprehensive sentiment analysis across multiple data sources.
-
-```python
-class TalosSentimentService(Service):
- def __init__(self):
- """Initialize sentiment analysis service."""
-```
-
-#### Methods
-
-##### `process(request: SentimentRequest) -> SentimentResponse`
-
-Perform comprehensive sentiment analysis.
-
-**Parameters:**
-- `request` (SentimentRequest): Request with query and analysis parameters
-
-**Returns:**
-- `SentimentResponse`: Comprehensive sentiment analysis results
-
-**Example:**
-```python
-from talos.services.sentiment import TalosSentimentService, SentimentRequest
-
-service = TalosSentimentService()
-request = SentimentRequest(
- query="DeFi yield farming",
- sources=["twitter", "reddit", "discord"],
- limit=500,
- days_back=7
-)
-
-response = service.process(request)
-print(f"Overall sentiment: {response.overall_sentiment}")
-print(f"Confidence: {response.confidence}")
-for theme in response.key_themes:
- print(f"Theme: {theme.topic} - Sentiment: {theme.sentiment}")
-```
-
-### SentimentRequest
-
-Request model for sentiment analysis operations.
-
-```python
-class SentimentRequest(ServiceRequest):
- query: str = Field(min_length=1, max_length=500)
- sources: List[str] = Field(default=["twitter"])
- limit: int = Field(default=100, ge=1, le=1000)
- days_back: int = Field(default=7, ge=1, le=30)
- language: str = Field(default="en")
- include_influencers: bool = Field(default=True)
-```
-
-### SentimentResponse
-
-Response model for sentiment analysis operations.
-
-```python
-class SentimentResponse(ServiceResponse):
- overall_sentiment: float = Field(ge=-1.0, le=1.0)
- confidence: float = Field(ge=0.0, le=1.0)
- sentiment_distribution: dict # positive, neutral, negative percentages
- key_themes: List[SentimentTheme]
- influential_voices: List[InfluentialVoice]
- trending_hashtags: List[str]
- recommendations: List[str]
- data_sources: dict # source -> count mapping
-```
-
-### SentimentTheme
-
-Individual sentiment theme with associated metrics.
-
-```python
-class SentimentTheme(BaseModel):
- topic: str
- sentiment: float = Field(ge=-1.0, le=1.0)
- mention_count: int = Field(ge=0)
- confidence: float = Field(ge=0.0, le=1.0)
- examples: List[str] = Field(default_factory=list)
-```
-
-### InfluentialVoice
-
-Influential account or voice in sentiment analysis.
-
-```python
-class InfluentialVoice(BaseModel):
- username: str
- platform: str
- follower_count: int = Field(ge=0)
- sentiment: float = Field(ge=-1.0, le=1.0)
- influence_score: float = Field(ge=0.0, le=1.0)
- recent_content: str
-```
-
-## GitHub Services
-
-### GithubService
-
-Handles GitHub operations including repository management and PR reviews.
-
-```python
-class GithubService(Service):
- def __init__(self, token: str):
- """Initialize GitHub service.
-
- Args:
- token: GitHub API token
- """
-```
-
-#### Methods
-
-##### `process(request: GithubRequest) -> GithubResponse`
-
-Process GitHub operations based on request type.
-
-**Parameters:**
-- `request` (GithubRequest): Request specifying the GitHub operation
-
-**Returns:**
-- `GithubResponse`: Response with operation results
-
-**Example:**
-```python
-from talos.services.github import GithubService, GithubRequest
-
-service = GithubService(token="your-github-token")
-
-# Review a pull request
-request = GithubRequest(
- operation="review_pr",
- repository="owner/repo",
- pr_number=123,
- auto_approve=False
-)
-
-response = service.process(request)
-print(f"Security Score: {response.security_score}")
-print(f"Quality Score: {response.quality_score}")
-print(f"Recommendation: {response.recommendation}")
-```
-
-### GithubRequest
-
-Request model for GitHub operations.
-
-```python
-class GithubRequest(ServiceRequest):
- operation: str # "review_pr", "list_prs", "approve_pr", "merge_pr"
- repository: str
- pr_number: Optional[int] = None
- state: str = Field(default="open") # open, closed, all
- auto_approve: bool = Field(default=False)
- post_review: bool = Field(default=False)
-```
-
-### GithubResponse
-
-Response model for GitHub operations.
-
-```python
-class GithubResponse(ServiceResponse):
- operation: str
- repository: str
- pr_number: Optional[int] = None
- security_score: Optional[int] = Field(ge=0, le=100)
- quality_score: Optional[int] = Field(ge=0, le=100)
- recommendation: Optional[str] = None
- detailed_analysis: Optional[str] = None
- pull_requests: Optional[List[dict]] = None
-```
-
-## Usage Examples
-
-### Comprehensive Yield Optimization
-
-```python
-from talos.services.yield_manager import YieldManagerService, YieldManagerRequest
-from talos.services.sentiment import TalosSentimentService, SentimentRequest
-
-# Get sentiment data
-sentiment_service = TalosSentimentService()
-sentiment_request = SentimentRequest(
- query="yield farming APR",
- sources=["twitter", "reddit"],
- limit=200
-)
-sentiment_response = sentiment_service.process(sentiment_request)
-
-# Calculate optimal APR using sentiment data
-yield_service = YieldManagerService()
-yield_request = YieldManagerRequest(
- current_apr=0.05,
- market_volatility=0.12,
- competitor_aprs=[0.06, 0.07, 0.055],
- sentiment_score=sentiment_response.overall_sentiment,
- tvl=2000000,
- utilization_rate=0.75
-)
-yield_response = yield_service.process(yield_request)
-
-print(f"Current sentiment: {sentiment_response.overall_sentiment}")
-print(f"Recommended APR: {yield_response.recommended_apr}")
-print(f"Reasoning: {yield_response.reasoning}")
-```
-
-### Automated PR Review Workflow
-
-```python
-from talos.services.github import GithubService, GithubRequest
-
-service = GithubService(token="your-token")
-
-# List open PRs
-list_request = GithubRequest(
- operation="list_prs",
- repository="your-org/your-repo",
- state="open"
-)
-list_response = service.process(list_request)
-
-# Review each PR
-for pr in list_response.pull_requests:
- review_request = GithubRequest(
- operation="review_pr",
- repository="your-org/your-repo",
- pr_number=pr["number"],
- post_review=True,
- auto_approve=True
- )
-
- review_response = service.process(review_request)
-
- print(f"PR #{pr['number']}: {review_response.recommendation}")
- print(f"Security: {review_response.security_score}/100")
- print(f"Quality: {review_response.quality_score}/100")
-```
-
-### Multi-Source Sentiment Analysis
-
-```python
-from talos.services.sentiment import TalosSentimentService, SentimentRequest
-
-service = TalosSentimentService()
-
-# Analyze sentiment across multiple topics
-topics = ["DeFi protocols", "yield farming", "staking rewards", "governance tokens"]
-results = {}
-
-for topic in topics:
- request = SentimentRequest(
- query=topic,
- sources=["twitter", "reddit"],
- limit=300,
- days_back=7
- )
-
- response = service.process(request)
- results[topic] = {
- "sentiment": response.overall_sentiment,
- "confidence": response.confidence,
- "themes": [theme.topic for theme in response.key_themes[:3]]
- }
-
-# Analyze results
-for topic, data in results.items():
- print(f"{topic}: {data['sentiment']:.2f} (confidence: {data['confidence']:.2f})")
- print(f" Key themes: {', '.join(data['themes'])}")
-```
-
-## Error Handling
-
-Services include comprehensive error handling for various failure scenarios:
-
-```python
-from talos.services.exceptions import ServiceError, APIError, ValidationError
-
-try:
- response = service.process(request)
- if not response.success:
- print(f"Service error: {response.error}")
- else:
- # Process successful response
- print(f"Success: {response.data}")
-
-except ValidationError as e:
- print(f"Invalid request: {e}")
-except APIError as e:
- print(f"External API error: {e}")
-except ServiceError as e:
- print(f"Service processing error: {e}")
-except Exception as e:
- print(f"Unexpected error: {e}")
-```
-
-## Performance Considerations
-
-### Caching
-
-Services implement intelligent caching for frequently accessed data:
-
-```python
-# Sentiment analysis results are cached for 1 hour
-# GitHub repository data is cached for 5 minutes
-# Market data is cached for 30 seconds
-```
-
-### Rate Limiting
-
-All services respect external API rate limits:
-
-- **GitHub API**: 5000 requests per hour
-- **Twitter API**: 300 requests per 15 minutes
-- **OpenAI API**: Varies by plan
-
-### Batch Operations
-
-Services support batch operations for efficiency:
-
-```python
-# Batch sentiment analysis
-batch_request = SentimentBatchRequest(
- queries=["topic1", "topic2", "topic3"],
- sources=["twitter"],
- limit=100
-)
-batch_response = sentiment_service.process_batch(batch_request)
-```
-
-This services API provides the business logic layer for Talos operations, enabling sophisticated protocol management through well-defined interfaces and comprehensive error handling.
diff --git a/docs/api/tools.md b/docs/api/tools.md
deleted file mode 100644
index a00feae9..00000000
--- a/docs/api/tools.md
+++ /dev/null
@@ -1,709 +0,0 @@
-# Tools API Reference
-
-This document provides detailed API reference for the tools layer of the Talos system, which handles external integrations and utilities.
-
-## Base Tool Interface
-
-### BaseTool
-
-Abstract base class for all tools in the Talos system.
-
-```python
-from abc import ABC, abstractmethod
-
-class BaseTool(ABC):
- name: str
- description: str
-
- @abstractmethod
- def execute(self, *args, **kwargs) -> Any:
- """Execute the tool with provided arguments."""
- pass
-```
-
-### SupervisedTool
-
-Wrapper that adds hypervisor approval to any tool.
-
-```python
-class SupervisedTool:
- def __init__(self, base_tool: BaseTool, supervisor: Supervisor):
- """Wrap a tool with supervision.
-
- Args:
- base_tool: The tool to wrap
- supervisor: Supervisor for approval decisions
- """
- self.base_tool = base_tool
- self.supervisor = supervisor
-
- def execute(self, *args, **kwargs) -> Any:
- """Execute tool with supervisor approval."""
-```
-
-## GitHub Tools
-
-### GithubTools
-
-Comprehensive GitHub API integration for repository management.
-
-```python
-class GithubTools:
- def __init__(self, token: str):
- """Initialize GitHub tools.
-
- Args:
- token: GitHub API token with appropriate permissions
- """
-```
-
-#### Repository Operations
-
-##### `get_all_pull_requests(user: str, project: str, state: str = "open") -> List[dict]`
-
-Retrieve pull requests from a repository.
-
-**Parameters:**
-- `user` (str): Repository owner
-- `project` (str): Repository name
-- `state` (str): PR state - "open", "closed", or "all"
-
-**Returns:**
-- `List[dict]`: List of pull request data
-
-**Example:**
-```python
-from talos.tools.github import GithubTools
-
-github = GithubTools(token="your-token")
-prs = github.get_all_pull_requests("microsoft", "vscode", state="open")
-
-for pr in prs:
- print(f"PR #{pr['number']}: {pr['title']}")
- print(f"Author: {pr['user']['login']}")
- print(f"State: {pr['state']}")
-```
-
-##### `get_open_issues(user: str, project: str) -> List[dict]`
-
-Retrieve open issues from a repository.
-
-**Parameters:**
-- `user` (str): Repository owner
-- `project` (str): Repository name
-
-**Returns:**
-- `List[dict]`: List of issue data
-
-##### `review_pull_request(user: str, project: str, pr_number: int) -> dict`
-
-Perform AI-powered review of a pull request.
-
-**Parameters:**
-- `user` (str): Repository owner
-- `project` (str): Repository name
-- `pr_number` (int): Pull request number
-
-**Returns:**
-- `dict`: Review analysis with security and quality scores
-
-**Example:**
-```python
-review = github.review_pull_request("owner", "repo", 123)
-print(f"Security Score: {review['security_score']}/100")
-print(f"Quality Score: {review['quality_score']}/100")
-print(f"Recommendation: {review['recommendation']}")
-print(f"Analysis: {review['detailed_analysis']}")
-```
-
-##### `approve_pull_request(user: str, project: str, pr_number: int) -> bool`
-
-Approve a pull request.
-
-**Parameters:**
-- `user` (str): Repository owner
-- `project` (str): Repository name
-- `pr_number` (int): Pull request number
-
-**Returns:**
-- `bool`: True if approval was successful
-
-##### `merge_pull_request(user: str, project: str, pr_number: int) -> bool`
-
-Merge an approved pull request.
-
-**Parameters:**
-- `user` (str): Repository owner
-- `project` (str): Repository name
-- `pr_number` (int): Pull request number
-
-**Returns:**
-- `bool`: True if merge was successful
-
-## Twitter Tools
-
-### TwitterTools
-
-Twitter API integration for social media monitoring and analysis.
-
-```python
-class TwitterTools:
- def __init__(self, bearer_token: str):
- """Initialize Twitter tools.
-
- Args:
- bearer_token: Twitter API Bearer Token
- """
-```
-
-#### Search and Analysis
-
-##### `search_tweets(query: str, limit: int = 100) -> List[dict]`
-
-Search for tweets matching a query.
-
-**Parameters:**
-- `query` (str): Search query
-- `limit` (int): Maximum number of tweets to return
-
-**Returns:**
-- `List[dict]`: List of tweet data
-
-**Example:**
-```python
-from talos.tools.twitter import TwitterTools
-
-twitter = TwitterTools(bearer_token="your-token")
-tweets = twitter.search_tweets("DeFi yield farming", limit=50)
-
-for tweet in tweets:
- print(f"@{tweet['author']['username']}: {tweet['text']}")
- print(f"Likes: {tweet['public_metrics']['like_count']}")
-```
-
-##### `get_user_tweets(username: str, limit: int = 100) -> List[dict]`
-
-Get recent tweets from a specific user.
-
-**Parameters:**
-- `username` (str): Twitter username (without @)
-- `limit` (int): Maximum number of tweets to return
-
-**Returns:**
-- `List[dict]`: List of tweet data
-
-##### `analyze_sentiment(tweets: List[dict]) -> dict`
-
-Analyze sentiment of a collection of tweets.
-
-**Parameters:**
-- `tweets` (List[dict]): List of tweet data
-
-**Returns:**
-- `dict`: Sentiment analysis results
-
-**Example:**
-```python
-tweets = twitter.search_tweets("protocol governance", limit=200)
-sentiment = twitter.analyze_sentiment(tweets)
-
-print(f"Overall sentiment: {sentiment['overall_sentiment']}")
-print(f"Positive: {sentiment['positive_ratio']:.1%}")
-print(f"Negative: {sentiment['negative_ratio']:.1%}")
-print(f"Neutral: {sentiment['neutral_ratio']:.1%}")
-```
-
-##### `get_trending_topics(location: str = "worldwide") -> List[str]`
-
-Get trending topics for a location.
-
-**Parameters:**
-- `location` (str): Location for trends (default: "worldwide")
-
-**Returns:**
-- `List[str]`: List of trending topics
-
-#### User Analysis
-
-##### `analyze_user_influence(username: str) -> dict`
-
-Analyze a user's influence and credibility.
-
-**Parameters:**
-- `username` (str): Twitter username (without @)
-
-**Returns:**
-- `dict`: User influence analysis
-
-**Example:**
-```python
-influence = twitter.analyze_user_influence("vitalikbuterin")
-print(f"Influence Score: {influence['influence_score']}/100")
-print(f"Follower Quality: {influence['follower_quality']}")
-print(f"Engagement Rate: {influence['engagement_rate']:.2%}")
-print(f"Expertise Areas: {', '.join(influence['expertise_areas'])}")
-```
-
-## IPFS Tools
-
-### IPFSTools
-
-IPFS integration for decentralized storage operations.
-
-```python
-class IPFSTools:
- def __init__(self, api_key: str, secret_key: str):
- """Initialize IPFS tools.
-
- Args:
- api_key: Pinata API key
- secret_key: Pinata secret key
- """
-```
-
-#### Storage Operations
-
-##### `upload_json(data: dict, name: str) -> str`
-
-Upload JSON data to IPFS.
-
-**Parameters:**
-- `data` (dict): JSON data to upload
-- `name` (str): Name for the uploaded content
-
-**Returns:**
-- `str`: IPFS hash of uploaded content
-
-**Example:**
-```python
-from talos.tools.ipfs import IPFSTools
-
-ipfs = IPFSTools(api_key="your-key", secret_key="your-secret")
-
-proposal_data = {
- "title": "Increase Staking Rewards",
- "description": "Proposal to increase staking rewards from 5% to 8%",
- "voting_period": "7 days",
- "created_at": "2024-01-15T10:00:00Z"
-}
-
-ipfs_hash = ipfs.upload_json(proposal_data, "governance-proposal-001")
-print(f"Proposal uploaded to IPFS: {ipfs_hash}")
-print(f"Access URL: https://gateway.pinata.cloud/ipfs/{ipfs_hash}")
-```
-
-##### `upload_text(content: str, name: str) -> str`
-
-Upload text content to IPFS.
-
-**Parameters:**
-- `content` (str): Text content to upload
-- `name` (str): Name for the uploaded content
-
-**Returns:**
-- `str`: IPFS hash of uploaded content
-
-##### `retrieve_content(ipfs_hash: str) -> str`
-
-Retrieve content from IPFS by hash.
-
-**Parameters:**
-- `ipfs_hash` (str): IPFS hash of content to retrieve
-
-**Returns:**
-- `str`: Retrieved content
-
-##### `pin_content(ipfs_hash: str) -> bool`
-
-Pin content to ensure it remains available.
-
-**Parameters:**
-- `ipfs_hash` (str): IPFS hash to pin
-
-**Returns:**
-- `bool`: True if pinning was successful
-
-## Cryptography Tools
-
-### CryptographyTools
-
-RSA encryption and decryption operations.
-
-```python
-class CryptographyTools:
- def __init__(self, key_dir: str = ".keys"):
- """Initialize cryptography tools.
-
- Args:
- key_dir: Directory containing RSA keys
- """
-```
-
-#### Key Management
-
-##### `generate_key_pair(key_size: int = 2048) -> Tuple[str, str]`
-
-Generate RSA key pair.
-
-**Parameters:**
-- `key_size` (int): Key size in bits (1024, 2048, or 4096)
-
-**Returns:**
-- `Tuple[str, str]`: (private_key_path, public_key_path)
-
-**Example:**
-```python
-from talos.tools.crypto import CryptographyTools
-
-crypto = CryptographyTools()
-private_key, public_key = crypto.generate_key_pair(key_size=2048)
-print(f"Keys generated: {private_key}, {public_key}")
-```
-
-##### `get_public_key() -> str`
-
-Get the current public key.
-
-**Returns:**
-- `str`: Public key in PEM format
-
-##### `get_key_fingerprint() -> str`
-
-Get fingerprint of the current key pair.
-
-**Returns:**
-- `str`: SHA256 fingerprint of the public key
-
-#### Encryption Operations
-
-##### `encrypt_data(data: str, public_key_path: str) -> str`
-
-Encrypt data using RSA public key.
-
-**Parameters:**
-- `data` (str): Data to encrypt
-- `public_key_path` (str): Path to public key file
-
-**Returns:**
-- `str`: Base64-encoded encrypted data
-
-**Example:**
-```python
-# Encrypt sensitive data
-encrypted = crypto.encrypt_data(
- "Secret protocol configuration",
- "recipient_public_key.pem"
-)
-print(f"Encrypted data: {encrypted}")
-```
-
-##### `decrypt_data(encrypted_data: str) -> str`
-
-Decrypt data using RSA private key.
-
-**Parameters:**
-- `encrypted_data` (str): Base64-encoded encrypted data
-
-**Returns:**
-- `str`: Decrypted plaintext data
-
-**Example:**
-```python
-# Decrypt received data
-decrypted = crypto.decrypt_data(encrypted_data)
-print(f"Decrypted: {decrypted}")
-```
-
-##### `sign_data(data: str) -> str`
-
-Create digital signature for data.
-
-**Parameters:**
-- `data` (str): Data to sign
-
-**Returns:**
-- `str`: Base64-encoded signature
-
-##### `verify_signature(data: str, signature: str, public_key_path: str) -> bool`
-
-Verify digital signature.
-
-**Parameters:**
-- `data` (str): Original data
-- `signature` (str): Base64-encoded signature
-- `public_key_path` (str): Path to public key file
-
-**Returns:**
-- `bool`: True if signature is valid
-
-## Tool Manager
-
-### ToolManager
-
-Central registry and manager for all tools.
-
-```python
-class ToolManager:
- def __init__(self):
- """Initialize tool manager."""
- self.tools: Dict[str, BaseTool] = {}
- self.supervised_tools: Dict[str, SupervisedTool] = {}
-```
-
-#### Tool Registration
-
-##### `register_tool(tool: BaseTool) -> None`
-
-Register a tool with the manager.
-
-**Parameters:**
-- `tool` (BaseTool): Tool to register
-
-##### `register_supervised_tool(tool: BaseTool, supervisor: Supervisor) -> None`
-
-Register a tool with supervision.
-
-**Parameters:**
-- `tool` (BaseTool): Tool to register
-- `supervisor` (Supervisor): Supervisor for approval
-
-##### `get_tool(name: str) -> Optional[BaseTool]`
-
-Get a registered tool by name.
-
-**Parameters:**
-- `name` (str): Tool name
-
-**Returns:**
-- `Optional[BaseTool]`: Tool instance or None
-
-**Example:**
-```python
-from talos.core.tool_manager import ToolManager
-from talos.tools.github import GithubTools
-from talos.hypervisor.supervisor import RuleBasedSupervisor
-
-# Create tool manager
-tool_manager = ToolManager()
-
-# Register tools
-github_tool = GithubTools(token="your-token")
-supervisor = RuleBasedSupervisor()
-
-tool_manager.register_supervised_tool(github_tool, supervisor)
-
-# Use tools
-github = tool_manager.get_tool("github")
-if github:
- prs = github.get_all_pull_requests("owner", "repo")
-```
-
-## Usage Examples
-
-### Comprehensive GitHub Workflow
-
-```python
-from talos.tools.github import GithubTools
-from talos.hypervisor.supervisor import RuleBasedSupervisor
-from talos.core.tool_manager import SupervisedTool
-
-# Set up supervised GitHub operations
-github = GithubTools(token="your-token")
-supervisor = RuleBasedSupervisor()
-supervised_github = SupervisedTool(github, supervisor)
-
-# Automated PR review workflow
-def review_repository_prs(owner: str, repo: str):
- # Get all open PRs
- prs = supervised_github.execute("get_all_pull_requests", owner, repo, "open")
-
- for pr in prs:
- print(f"Reviewing PR #{pr['number']}: {pr['title']}")
-
- # Perform AI review
- review = supervised_github.execute("review_pull_request", owner, repo, pr['number'])
-
- print(f"Security Score: {review['security_score']}/100")
- print(f"Quality Score: {review['quality_score']}/100")
-
- # Auto-approve if criteria are met
- if review['security_score'] >= 85 and review['quality_score'] >= 90:
- supervised_github.execute("approve_pull_request", owner, repo, pr['number'])
- print(f"PR #{pr['number']} approved automatically")
- else:
- print(f"PR #{pr['number']} requires manual review")
-
-review_repository_prs("your-org", "your-repo")
-```
-
-### Social Media Sentiment Pipeline
-
-```python
-from talos.tools.twitter import TwitterTools
-from talos.tools.ipfs import IPFSTools
-
-# Set up tools
-twitter = TwitterTools(bearer_token="your-token")
-ipfs = IPFSTools(api_key="your-key", secret_key="your-secret")
-
-def analyze_protocol_sentiment(protocol_name: str):
- # Collect tweets
- tweets = twitter.search_tweets(f"{protocol_name} protocol", limit=500)
-
- # Analyze sentiment
- sentiment = twitter.analyze_sentiment(tweets)
-
- # Identify influential voices
- influential_users = []
- for tweet in tweets[:50]: # Top 50 tweets
- if tweet['author']['public_metrics']['followers_count'] > 10000:
- influence = twitter.analyze_user_influence(tweet['author']['username'])
- influential_users.append(influence)
-
- # Compile report
- report = {
- "protocol": protocol_name,
- "analysis_date": datetime.now().isoformat(),
- "tweet_count": len(tweets),
- "sentiment": sentiment,
- "influential_voices": influential_users[:10], # Top 10
- "recommendations": generate_recommendations(sentiment)
- }
-
- # Store report on IPFS
- ipfs_hash = ipfs.upload_json(report, f"{protocol_name}-sentiment-report")
- print(f"Sentiment report stored: https://gateway.pinata.cloud/ipfs/{ipfs_hash}")
-
- return report
-
-def generate_recommendations(sentiment_data):
- recommendations = []
-
- if sentiment_data['overall_sentiment'] < -0.3:
- recommendations.append("Consider community engagement to address concerns")
- recommendations.append("Review recent protocol changes for negative impact")
-
- if sentiment_data['positive_ratio'] > 0.7:
- recommendations.append("Leverage positive sentiment for marketing campaigns")
- recommendations.append("Consider expanding successful initiatives")
-
- return recommendations
-
-# Analyze multiple protocols
-protocols = ["Compound", "Aave", "Uniswap"]
-for protocol in protocols:
- report = analyze_protocol_sentiment(protocol)
- print(f"{protocol} overall sentiment: {report['sentiment']['overall_sentiment']:.2f}")
-```
-
-### Secure Data Management
-
-```python
-from talos.tools.crypto import CryptographyTools
-from talos.tools.ipfs import IPFSTools
-
-# Set up tools
-crypto = CryptographyTools()
-ipfs = IPFSTools(api_key="your-key", secret_key="your-secret")
-
-def secure_proposal_storage(proposal_data: dict):
- # Generate keys if needed
- if not crypto.get_public_key():
- private_key, public_key = crypto.generate_key_pair(2048)
- print(f"Generated new key pair: {public_key}")
-
- # Encrypt sensitive data
- sensitive_fields = ["financial_impact", "implementation_details"]
- encrypted_data = proposal_data.copy()
-
- for field in sensitive_fields:
- if field in encrypted_data:
- encrypted_value = crypto.encrypt_data(
- str(encrypted_data[field]),
- crypto.get_public_key()
- )
- encrypted_data[f"{field}_encrypted"] = encrypted_value
- del encrypted_data[field]
-
- # Sign the proposal
- proposal_text = json.dumps(encrypted_data, sort_keys=True)
- signature = crypto.sign_data(proposal_text)
- encrypted_data["signature"] = signature
-
- # Store on IPFS
- ipfs_hash = ipfs.upload_json(encrypted_data, "secure-proposal")
-
- return {
- "ipfs_hash": ipfs_hash,
- "public_key_fingerprint": crypto.get_key_fingerprint(),
- "encrypted_fields": sensitive_fields
- }
-
-# Example usage
-proposal = {
- "title": "Treasury Rebalancing Proposal",
- "description": "Proposal to rebalance treasury allocation",
- "financial_impact": {"amount": 1000000, "risk_level": "medium"},
- "implementation_details": {"timeline": "30 days", "steps": ["step1", "step2"]},
- "voting_period": "7 days"
-}
-
-result = secure_proposal_storage(proposal)
-print(f"Secure proposal stored: {result['ipfs_hash']}")
-```
-
-## Error Handling
-
-All tools include comprehensive error handling:
-
-```python
-from talos.tools.exceptions import ToolError, APIError, AuthenticationError
-
-try:
- result = tool.execute(*args, **kwargs)
-except AuthenticationError as e:
- print(f"Authentication failed: {e}")
- # Handle token refresh or re-authentication
-except APIError as e:
- print(f"API error: {e}")
- if e.status_code == 429:
- # Handle rate limiting
- time.sleep(60)
- result = tool.execute(*args, **kwargs)
-except ToolError as e:
- print(f"Tool execution error: {e}")
-except Exception as e:
- print(f"Unexpected error: {e}")
-```
-
-## Performance Considerations
-
-### Rate Limiting
-
-Tools automatically handle API rate limits:
-- **GitHub**: 5000 requests/hour, automatic backoff
-- **Twitter**: 300 requests/15 minutes, intelligent queuing
-- **IPFS**: No strict limits, but connection pooling used
-
-### Caching
-
-Tools implement intelligent caching:
-- **Repository data**: 5-minute cache
-- **User profiles**: 1-hour cache
-- **Sentiment analysis**: 30-minute cache for same queries
-
-### Batch Operations
-
-Many tools support batch operations for efficiency:
-
-```python
-# Batch GitHub operations
-prs_to_review = [123, 124, 125, 126]
-reviews = github.batch_review_pull_requests("owner", "repo", prs_to_review)
-
-# Batch Twitter analysis
-queries = ["DeFi", "yield farming", "staking", "governance"]
-sentiment_results = twitter.batch_sentiment_analysis(queries, limit=100)
-```
-
-This tools API provides the external integration layer for Talos, enabling sophisticated interactions with GitHub, Twitter, IPFS, and cryptographic operations while maintaining security through supervised execution.
diff --git a/docs/architecture/agents.md b/docs/architecture/agents.md
deleted file mode 100644
index c3a79585..00000000
--- a/docs/architecture/agents.md
+++ /dev/null
@@ -1,211 +0,0 @@
-# Agent System
-
-The agent system in Talos provides the foundation for AI-driven protocol management through a hierarchical architecture of specialized agents.
-
-## Agent Hierarchy
-
-### MainAgent
-
-The `MainAgent` serves as the top-level orchestrator that integrates all system components:
-
-```python
-class MainAgent:
- def __init__(self):
- self.skills = []
- self.services = []
- self.hypervisor = Hypervisor()
- self.tool_manager = ToolManager()
- self.memory = Memory()
-```
-
-**Key Responsibilities:**
-- **Query Routing** - Directs user queries to appropriate skills/services
-- **Action Supervision** - Ensures all actions pass through hypervisor approval
-- **Tool Coordination** - Manages available tools and their registration
-- **Memory Management** - Maintains persistent conversation history
-- **Skill Integration** - Orchestrates multiple skills for complex tasks
-
-**Workflow:**
-1. Receives user input
-2. Routes query directly to appropriate skill/service
-3. Executes actions through SupervisedTool wrappers
-4. Stores results in Memory for future reference
-5. Returns structured response to user
-
-### Base Agent
-
-The `Agent` class provides core functionality inherited by all specialized agents:
-
-**Core Features:**
-- **LLM Interaction** - Standardized interface to language models (default: GPT-5)
-- **Conversation History** - Maintains context across interactions using message history
-- **Memory Integration** - Semantic search and retrieval of past conversations
-- **Prompt Management** - Template-based prompt system with dynamic loading
-
-**Implementation Details:**
-```python
-class Agent:
- def __init__(self, model: str = "gpt-5"):
- self.model = model
- self.history = []
- self.memory = Memory()
- self.prompt_manager = PromptManager()
-```
-
-## Specialized Agents
-
-### GitHub PR Review Agent
-
-Specialized agent for automated code review:
-
-**Capabilities:**
-- **Code Analysis** - Reviews pull requests for quality and security
-- **Security Scoring** - Assigns security scores (0-100) based on code analysis
-- **Quality Assessment** - Evaluates code quality and adherence to standards
-- **Automated Feedback** - Generates detailed review comments
-- **Approval Workflow** - Can automatically approve PRs meeting criteria
-
-**Integration:**
-- Uses `GithubService` for repository operations
-- Leverages `GithubTools` for API interactions
-- Supervised through hypervisor for all actions
-
-### Sentiment Analysis Agents
-
-Specialized agents for social media and community sentiment:
-
-**Twitter Sentiment Agent:**
-- Analyzes tweet sentiment for specific queries
-- Tracks trending topics and community discussions
-- Evaluates account influence and credibility
-- Generates sentiment reports with scoring
-
-**Community Monitoring Agent:**
-- Monitors multiple social platforms
-- Aggregates sentiment across channels
-- Identifies emerging trends and concerns
-- Provides actionable insights for protocol decisions
-
-## Agent Communication
-
-### Message System
-
-Agents communicate through a standardized message system:
-
-```python
-class Message:
- content: str
- role: str # "human", "assistant", "system"
- metadata: dict
- timestamp: datetime
-```
-
-**Message Types:**
-- **HumanMessage** - User input and queries
-- **AIMessage** - Agent responses and analysis
-- **SystemMessage** - Internal system communications
-
-### History Management
-
-**Conversation Persistence:**
-- All interactions stored in persistent memory
-- Semantic search enables context retrieval
-- Message history maintains conversation flow
-- Metadata enables filtering and categorization
-
-**Memory Integration:**
-- Vector embeddings for semantic similarity
-- FAISS backend for efficient search
-- Batch operations for performance optimization
-- Automatic memory consolidation
-
-## Agent Lifecycle
-
-### Initialization
-
-1. **Configuration Loading** - Load agent-specific settings
-2. **Tool Registration** - Register available tools with ToolManager
-3. **Memory Initialization** - Load persistent memory and history
-4. **Prompt Loading** - Load prompt templates from files
-5. **Service Integration** - Connect to external services (GitHub, Twitter, etc.)
-
-### Execution Cycle
-
-1. **Input Processing** - Parse and validate user input
-2. **Context Retrieval** - Search memory for relevant context
-3. **Skill Selection** - Route query to appropriate skill/service
-4. **Action Planning** - Generate execution plan for complex tasks
-5. **Supervised Execution** - Execute actions through hypervisor approval
-6. **Result Processing** - Format and store results
-7. **Response Generation** - Generate user-facing response
-
-### Shutdown
-
-1. **Memory Persistence** - Save conversation history and memories
-2. **Tool Cleanup** - Properly close external connections
-3. **State Serialization** - Save agent state for future sessions
-
-## Agent Configuration
-
-### Model Selection
-
-Agents can be configured with different LLM models:
-
-```python
-# Default configuration
-agent = Agent(model="gpt-5")
-
-# Custom model for specific tasks
-code_review_agent = Agent(model="gpt-4o-code")
-```
-
-### Prompt Customization
-
-Agents use template-based prompts that can be customized:
-
-```json
-{
- "system_prompt": "You are Talos, an AI protocol owner...",
- "task_prompts": {
- "proposal_evaluation": "Analyze the following proposal...",
- "sentiment_analysis": "Evaluate the sentiment of..."
- }
-}
-```
-
-### Memory Configuration
-
-Memory system can be tuned for different use cases:
-
-```python
-memory = Memory(
- batch_size=10, # Batch writes for performance
- auto_save=True, # Automatic persistence
- max_memories=1000 # Memory limit
-)
-```
-
-## Best Practices
-
-### Agent Design
-
-- **Single Responsibility** - Each agent should have a clear, focused purpose
-- **Stateless Operations** - Minimize agent state for reliability
-- **Error Handling** - Robust error handling and recovery
-- **Logging** - Comprehensive logging for debugging and monitoring
-
-### Performance Optimization
-
-- **Batch Operations** - Use batch processing for memory operations
-- **Caching** - Cache frequently accessed data (prompts, configurations)
-- **Lazy Loading** - Load resources only when needed
-- **Connection Pooling** - Reuse connections to external services
-
-### Security Considerations
-
-- **Input Validation** - Validate all user inputs
-- **Supervised Execution** - All actions must pass hypervisor approval
-- **Audit Trails** - Maintain logs of all agent actions
-- **Access Control** - Implement proper permissions for external services
-
-This agent system provides the foundation for Talos's autonomous protocol management capabilities while maintaining security and reliability through supervised execution.
diff --git a/docs/architecture/components.md b/docs/architecture/components.md
deleted file mode 100644
index e53a760f..00000000
--- a/docs/architecture/components.md
+++ /dev/null
@@ -1,163 +0,0 @@
-# Core Components
-
-Talos is comprised of several key components that allow it to function as a decentralized AI protocol owner.
-
-## System Architecture
-
-The codebase follows a layered architecture with clear separation of concerns:
-
-```
-src/talos/
-├── core/ # Core agent system and orchestration
-├── skills/ # Modular capabilities (sentiment analysis, proposals, etc.)
-├── services/ # Business logic implementations
-├── tools/ # External API integrations and utilities
-├── hypervisor/ # Action supervision and approval system
-├── prompts/ # LLM prompt templates and management
-├── cli/ # Command-line interface
-├── data/ # Data management and vector storage
-├── models/ # Pydantic data models
-└── utils/ # Utility functions and clients
-```
-
-## Core Components
-
-### Hypervisor and Supervisor
-
-The **Hypervisor** is the core of Talos's governance capabilities. It monitors all actions and uses a Supervisor to approve or deny them based on a set of rules and the agent's history. This protects the protocol from malicious or erroneous actions.
-
-**Key Features:**
-- Monitors all agent actions in real-time
-- Rule-based approval/denial system
-- Maintains audit trails of all decisions
-- Integrates with LLM prompts for complex decision making
-- Supports multiple supervisor implementations
-
-**Components:**
-- `Hypervisor` - Main monitoring and coordination system
-- `Supervisor` - Abstract interface for approval logic
-- `RuleBasedSupervisor` - Concrete implementation with configurable rules
-
-### Proposal Evaluation System
-
-Talos can systematically evaluate governance proposals, providing detailed analysis to help stakeholders make informed decisions.
-
-**Capabilities:**
-- LLM-based proposal analysis
-- Risk assessment and scoring
-- Community feedback integration
-- Recommendation generation with reasoning
-- Historical proposal tracking
-
-**Implementation:**
-- `ProposalsSkill` - Main proposal evaluation logic
-- Integration with external data sources
-- Structured output with scoring metrics
-
-### Tool-Based Architecture
-
-Talos uses a variety of tools to interact with external services like Twitter, GitHub, and GitBook, allowing it to perform a wide range of tasks.
-
-**Tool Management:**
-- `ToolManager` - Central registry for all available tools
-- `SupervisedTool` - Wrapper that adds approval workflow to any tool
-- Dynamic tool discovery and registration
-- Extensible architecture for new integrations
-
-**Available Tools:**
-- **GitHub Tools** - Repository management, PR reviews, issue tracking
-- **Twitter Tools** - Social media monitoring, sentiment analysis, posting
-- **IPFS Tools** - Decentralized storage and content management
-- **Cryptography Tools** - Key management, encryption/decryption
-
-## Agent System
-
-### Main Agent
-
-The `MainAgent` serves as the top-level orchestrator that integrates all system components:
-
-- **Direct Skill/Service Management** - Manages skills and services directly
-- **Hypervisor Integration** - Ensures all actions are supervised
-- **Tool Management** - Manages available tools and their registration
-- **Memory System** - Persistent conversation history and semantic search
-- **Skill Coordination** - Orchestrates multiple skills for complex tasks
-
-### Base Agent
-
-The `Agent` class provides core functionality for all AI agents:
-
-- **LLM Interaction** - Standardized interface to language models
-- **Conversation History** - Maintains context across interactions
-- **Memory Management** - Semantic search and retrieval
-- **Prompt Management** - Template-based prompt system
-
-## Data Management
-
-### Memory System
-
-Persistent storage with semantic search capabilities:
-
-- **FAISS Integration** - Vector similarity search
-- **Conversation History** - Maintains context across sessions
-- **Metadata Support** - Rich tagging and filtering
-- **Batch Operations** - Optimized for performance
-
-### Dataset Management
-
-Manages textual datasets with vector embeddings:
-
-- **Vector Embeddings** - Semantic similarity search
-- **FAISS Backend** - Efficient similarity queries
-- **Batch Processing** - Optimized for large datasets
-- **Metadata Integration** - Rich content tagging
-
-## External Integrations
-
-### GitHub Integration
-
-Comprehensive GitHub API integration:
-
-- **Repository Operations** - Clone, fork, branch management
-- **Pull Request Management** - Review, approve, merge workflows
-- **Issue Tracking** - Create, update, close issues
-- **Code Review** - AI-powered code analysis and feedback
-
-### Twitter Integration
-
-Social media monitoring and engagement:
-
-- **Content Analysis** - Sentiment analysis and trend detection
-- **Account Evaluation** - Influence scoring and verification
-- **Automated Posting** - Scheduled and reactive content
-- **Community Monitoring** - Real-time sentiment tracking
-
-### IPFS Integration
-
-Decentralized storage capabilities:
-
-- **Content Storage** - Immutable content addressing
-- **Metadata Management** - Rich content descriptions
-- **Pinning Services** - Reliable content availability
-- **Gateway Integration** - HTTP access to IPFS content
-
-## Configuration and Extensibility
-
-### Prompt Management
-
-Template-based prompt system:
-
-- **File-based Templates** - JSON prompt definitions
-- **Dynamic Loading** - Runtime prompt updates
-- **Concatenation Support** - Modular prompt composition
-- **Version Control** - Track prompt changes over time
-
-### Skill System
-
-Modular capability architecture:
-
-- **Abstract Base Classes** - Standardized skill interface
-- **Dynamic Registration** - Runtime skill discovery
-- **Parameter Validation** - Type-safe skill execution
-- **Result Standardization** - Consistent output formats
-
-This architecture enables Talos to operate as a sophisticated AI protocol owner while maintaining security, extensibility, and reliability.
diff --git a/docs/architecture/hypervisor.md b/docs/architecture/hypervisor.md
deleted file mode 100644
index becc5644..00000000
--- a/docs/architecture/hypervisor.md
+++ /dev/null
@@ -1,274 +0,0 @@
-# Hypervisor System
-
-The Hypervisor is the core security and governance component of Talos, responsible for monitoring and approving all agent actions to ensure protocol safety and integrity.
-
-## Overview
-
-The Hypervisor system implements a multi-layered approval mechanism that validates all agent actions before execution. This prevents malicious or erroneous actions from affecting the protocol while maintaining autonomous operation.
-
-## Architecture
-
-### Core Components
-
-```python
-class Hypervisor:
- def __init__(self):
- self.supervisor = RuleBasedSupervisor()
- self.action_log = ActionLog()
- self.prompt_manager = PromptManager()
-```
-
-**Key Components:**
-- **Hypervisor** - Main coordination and monitoring system
-- **Supervisor** - Abstract interface for approval logic
-- **ActionLog** - Audit trail of all actions and decisions
-- **PromptManager** - LLM prompts for complex decision making
-
-### Supervisor Interface
-
-The `Supervisor` provides an abstract interface for different approval strategies:
-
-```python
-class Supervisor:
- def approve_action(self, action: Action, context: dict) -> ApprovalResult:
- """Approve or deny an action based on rules and context"""
- pass
-```
-
-**Implementations:**
-- **RuleBasedSupervisor** - Uses predefined rules for approval
-- **LLMSupervisor** - Uses language models for complex decisions
-- **HybridSupervisor** - Combines rule-based and LLM approaches
-
-## Approval Workflow
-
-### Action Submission
-
-1. **Action Creation** - Agent creates an action request
-2. **Context Gathering** - Collect relevant context (history, rules, metadata)
-3. **Supervisor Evaluation** - Submit to appropriate supervisor
-4. **Decision Recording** - Log approval/denial with reasoning
-5. **Action Execution** - Execute if approved, block if denied
-
-### Approval Criteria
-
-**Rule-Based Criteria:**
-- **Whitelist/Blacklist** - Allowed/forbidden actions
-- **Rate Limiting** - Maximum actions per time period
-- **Resource Limits** - CPU, memory, network usage constraints
-- **Permission Checks** - Required permissions for specific actions
-
-**LLM-Based Criteria:**
-- **Intent Analysis** - Understand the purpose of the action
-- **Risk Assessment** - Evaluate potential negative consequences
-- **Protocol Alignment** - Ensure actions align with protocol goals
-- **Context Appropriateness** - Verify action fits current situation
-
-## SupervisedTool System
-
-### Tool Wrapping
-
-All external tools are wrapped with supervision:
-
-```python
-class SupervisedTool:
- def __init__(self, base_tool: BaseTool, supervisor: Supervisor):
- self.base_tool = base_tool
- self.supervisor = supervisor
-
- def execute(self, *args, **kwargs):
- action = Action(tool=self.base_tool, args=args, kwargs=kwargs)
- approval = self.supervisor.approve_action(action)
-
- if approval.approved:
- return self.base_tool.execute(*args, **kwargs)
- else:
- raise ActionDeniedException(approval.reason)
-```
-
-**Benefits:**
-- **Transparent Integration** - No changes required to existing tools
-- **Consistent Approval** - All tools use same approval mechanism
-- **Audit Trail** - All tool usage is logged and tracked
-- **Flexible Policies** - Different approval rules per tool type
-
-### Tool Categories
-
-**High-Risk Tools:**
-- **GitHub Operations** - Code changes, repository management
-- **Financial Operations** - Treasury management, token transfers
-- **System Operations** - Configuration changes, service restarts
-
-**Medium-Risk Tools:**
-- **Social Media** - Twitter posting, community engagement
-- **Data Operations** - Database queries, file operations
-- **Communication** - Email, notifications, alerts
-
-**Low-Risk Tools:**
-- **Read Operations** - Data retrieval, status checks
-- **Analysis Tools** - Sentiment analysis, data processing
-- **Reporting Tools** - Log generation, metrics collection
-
-## Rule Configuration
-
-### Rule Definition
-
-Rules are defined in JSON configuration files:
-
-```json
-{
- "rules": [
- {
- "name": "github_pr_approval",
- "condition": {
- "tool": "github",
- "action": "approve_pr",
- "max_per_hour": 5
- },
- "approval": "require_review"
- },
- {
- "name": "twitter_posting",
- "condition": {
- "tool": "twitter",
- "action": "post_tweet"
- },
- "approval": "auto_approve",
- "filters": ["content_moderation", "brand_guidelines"]
- }
- ]
-}
-```
-
-**Rule Components:**
-- **Condition** - When the rule applies
-- **Approval** - Approval strategy (auto, deny, require_review)
-- **Filters** - Additional validation steps
-- **Metadata** - Rule description and documentation
-
-### Dynamic Rule Updates
-
-Rules can be updated dynamically without system restart:
-
-- **Hot Reloading** - Rules reloaded from configuration files
-- **Version Control** - Track rule changes over time
-- **Rollback Support** - Revert to previous rule versions
-- **A/B Testing** - Test new rules with subset of actions
-
-## Monitoring and Alerting
-
-### Action Monitoring
-
-**Real-time Monitoring:**
-- **Action Queue** - Track pending approvals
-- **Approval Rates** - Monitor approval/denial ratios
-- **Performance Metrics** - Approval latency and throughput
-- **Error Tracking** - Failed approvals and system errors
-
-**Historical Analysis:**
-- **Trend Analysis** - Approval patterns over time
-- **Risk Assessment** - Identify high-risk action patterns
-- **Compliance Reporting** - Generate audit reports
-- **Performance Optimization** - Identify bottlenecks
-
-### Alerting System
-
-**Alert Types:**
-- **High-Risk Actions** - Actions requiring immediate attention
-- **Approval Failures** - System errors in approval process
-- **Rate Limit Violations** - Excessive action attempts
-- **Security Incidents** - Potential malicious activity
-
-**Alert Channels:**
-- **Email Notifications** - Critical alerts to administrators
-- **Slack Integration** - Real-time team notifications
-- **Dashboard Alerts** - Visual indicators in monitoring UI
-- **API Webhooks** - Integration with external systems
-
-## Security Features
-
-### Audit Trail
-
-Complete audit trail of all actions:
-
-```python
-class ActionLog:
- def log_action(self, action: Action, result: ApprovalResult):
- entry = {
- "timestamp": datetime.now(),
- "action": action.serialize(),
- "approval": result.approved,
- "reason": result.reason,
- "supervisor": result.supervisor_id,
- "context": action.context
- }
- self.store(entry)
-```
-
-**Audit Features:**
-- **Immutable Logs** - Cannot be modified after creation
-- **Cryptographic Signatures** - Verify log integrity
-- **Retention Policies** - Automatic log archival and cleanup
-- **Export Capabilities** - Generate compliance reports
-
-### Access Control
-
-**Permission System:**
-- **Role-Based Access** - Different permissions per role
-- **Action-Level Permissions** - Granular control over specific actions
-- **Time-Based Permissions** - Temporary elevated access
-- **Multi-Factor Authentication** - Additional security for sensitive actions
-
-### Threat Detection
-
-**Anomaly Detection:**
-- **Behavioral Analysis** - Detect unusual action patterns
-- **Rate Limiting** - Prevent abuse and DoS attacks
-- **Signature Detection** - Identify known attack patterns
-- **Machine Learning** - Adaptive threat detection
-
-## Configuration Examples
-
-### Basic Configuration
-
-```yaml
-hypervisor:
- supervisor: "rule_based"
- rules_file: "config/approval_rules.json"
- audit_log: "logs/actions.log"
-
-approval_settings:
- default_timeout: 30
- max_pending_actions: 100
- require_confirmation: ["high_risk", "financial"]
-```
-
-### Advanced Configuration
-
-```yaml
-hypervisor:
- supervisor: "hybrid"
- llm_model: "gpt-5"
- confidence_threshold: 0.8
-
-risk_categories:
- high_risk:
- - "github.merge_pr"
- - "treasury.transfer_funds"
- - "system.restart_service"
-
- medium_risk:
- - "twitter.post_tweet"
- - "github.create_issue"
-
-monitoring:
- alerts:
- - type: "high_risk_action"
- threshold: 1
- channels: ["email", "slack"]
- - type: "approval_failure_rate"
- threshold: 0.1
- window: "1h"
-```
-
-The Hypervisor system ensures that Talos can operate autonomously while maintaining the highest levels of security and governance oversight.
diff --git a/docs/architecture/skills-services.md b/docs/architecture/skills-services.md
deleted file mode 100644
index 497b3cd2..00000000
--- a/docs/architecture/skills-services.md
+++ /dev/null
@@ -1,306 +0,0 @@
-# Skills & Services
-
-Talos uses a modular architecture with Skills and Services that provide specialized capabilities for different aspects of protocol management.
-
-## Architecture Overview
-
-### Skills vs Services
-
-**Skills** are modular capabilities that can be directly invoked by users or other system components:
-- User-facing functionality
-- Direct query handling
-- Standardized input/output interface
-- Can be combined for complex workflows
-
-**Services** are business logic implementations that provide domain-specific functionality:
-- Backend processing logic
-- Integration with external systems
-- Can be used by multiple skills
-- Focus on specific business domains
-
-## Skills System
-
-### Base Skill Interface
-
-All skills inherit from the abstract `Skill` base class:
-
-```python
-class Skill:
- def run(self, **kwargs) -> QueryResponse:
- """Execute the skill with provided parameters"""
- pass
-
- def create_ticket_tool(self) -> BaseTool:
- """Create a tool for ticket-based execution"""
- pass
-```
-
-**Standard Interface:**
-- **run()** method for direct execution
-- **QueryResponse** return type for consistent output
-- **create_ticket_tool()** for tool integration
-- Parameter validation and type checking
-
-### Available Skills
-
-#### ProposalsSkill
-
-Evaluates governance proposals using LLM analysis:
-
-**Capabilities:**
-- **Proposal Analysis** - Detailed evaluation of governance proposals
-- **Risk Assessment** - Identify potential risks and benefits
-- **Community Impact** - Assess impact on community and stakeholders
-- **Recommendation Generation** - Provide clear approve/reject recommendations
-
-**Usage:**
-```python
-skill = ProposalsSkill()
-result = skill.run(proposal_text="Increase staking rewards by 10%")
-```
-
-**Output:**
-- Detailed analysis report
-- Risk/benefit assessment
-- Community impact evaluation
-- Clear recommendation with reasoning
-
-#### TwitterSentimentSkill
-
-Analyzes Twitter sentiment for given queries:
-
-**Capabilities:**
-- **Tweet Collection** - Gather relevant tweets for analysis
-- **Sentiment Analysis** - Evaluate positive/negative sentiment
-- **Trend Detection** - Identify emerging trends and topics
-- **Influence Scoring** - Assess account influence and credibility
-
-**Usage:**
-```python
-skill = TwitterSentimentSkill()
-result = skill.run(query="DeFi yield farming", limit=100)
-```
-
-**Output:**
-- Sentiment scores and distribution
-- Key themes and topics
-- Influential accounts and tweets
-- Trend analysis and insights
-
-#### TwitterInfluencerSkill
-
-Evaluates Twitter accounts for crypto influence:
-
-**Capabilities:**
-- **Account Analysis** - Comprehensive account evaluation
-- **Influence Scoring** - Multi-metric influence assessment
-- **Content Analysis** - Evaluate tweet quality and relevance
-- **Network Analysis** - Assess follower quality and engagement
-
-**Metrics:**
-- Follower count and growth
-- Engagement rates
-- Content quality scores
-- Network influence metrics
-
-#### CryptographySkill
-
-Provides encryption and decryption operations:
-
-**Capabilities:**
-- **Key Generation** - RSA key pair generation
-- **Encryption/Decryption** - Secure message handling
-- **Digital Signatures** - Message signing and verification
-- **Key Management** - Secure key storage and retrieval
-
-**Security Features:**
-- Industry-standard encryption algorithms
-- Secure key storage
-- Audit trail for all operations
-- Integration with hardware security modules
-
-#### ExecutionPlannerSkill
-
-Generates execution plans for complex tasks:
-
-**Capabilities:**
-- **Task Decomposition** - Break complex tasks into steps
-- **Dependency Analysis** - Identify task dependencies
-- **Resource Planning** - Estimate required resources
-- **Timeline Generation** - Create realistic execution timelines
-
-**Use Cases:**
-- Protocol upgrade planning
-- Treasury rebalancing strategies
-- Community engagement campaigns
-- Development roadmap planning
-
-## Services System
-
-### Base Service Interface
-
-Services implement the abstract `Service` interface:
-
-```python
-class Service:
- def process(self, request: ServiceRequest) -> ServiceResponse:
- """Process a service request"""
- pass
-```
-
-### Available Services
-
-#### YieldManagerService
-
-Calculates optimal staking APR using market data and sentiment:
-
-**Inputs:**
-- Current market conditions
-- Protocol metrics (TVL, utilization)
-- Community sentiment data
-- Competitor analysis
-
-**Processing:**
-- **Market Analysis** - Evaluate current DeFi landscape
-- **Risk Assessment** - Assess protocol-specific risks
-- **Sentiment Integration** - Factor in community sentiment
-- **Optimization** - Calculate optimal APR using LLM reasoning
-
-**Output:**
-- Recommended APR with reasoning
-- Risk assessment and mitigation strategies
-- Market positioning analysis
-- Implementation timeline
-
-#### TalosSentimentService
-
-Orchestrates comprehensive sentiment analysis:
-
-**Workflow:**
-1. **Data Collection** - Gather data from multiple sources
-2. **Preprocessing** - Clean and normalize data
-3. **Analysis** - Apply sentiment analysis algorithms
-4. **Aggregation** - Combine results from different sources
-5. **Reporting** - Generate actionable insights
-
-**Data Sources:**
-- Twitter and social media
-- Discord and Telegram communities
-- Reddit discussions
-- News articles and blogs
-
-#### GithubService
-
-Handles GitHub operations and PR reviews:
-
-**Capabilities:**
-- **Repository Management** - Clone, fork, branch operations
-- **Pull Request Reviews** - Automated code review and scoring
-- **Issue Management** - Create, update, and track issues
-- **Workflow Automation** - CI/CD integration and automation
-
-**Review Process:**
-1. **Code Analysis** - Static analysis and quality checks
-2. **Security Scanning** - Vulnerability detection
-3. **Style Validation** - Code style and convention checks
-4. **Test Coverage** - Ensure adequate test coverage
-5. **Documentation** - Verify documentation updates
-
-## Skill and Service Management
-
-### Direct Management
-
-The `MainAgent` directly manages skills and services without a separate Router:
-
-```python
-class MainAgent:
- def __init__(self):
- self.skills = []
- self.services = []
- self.keyword_mapping = {}
-
- def route(self, query: str) -> Union[Skill, Service]:
- """Route query to appropriate handler"""
- pass
-```
-
-**Routing Logic:**
-- **Keyword Matching** - Match query keywords to skills/services
-- **Intent Recognition** - Understand user intent from query
-- **Context Awareness** - Consider conversation history
-- **Fallback Handling** - Default routing for unmatched queries
-
-### Registration System
-
-Skills and services are dynamically registered:
-
-```python
-# Skills and services are automatically registered during MainAgent initialization
-# based on available API keys and configuration
-```
-
-## Integration Patterns
-
-### Skill Composition
-
-Skills can be combined for complex workflows:
-
-```python
-class ComplexWorkflowSkill(Skill):
- def __init__(self):
- self.sentiment_skill = TwitterSentimentSkill()
- self.yield_service = YieldManagerService()
-
- def run(self, **kwargs):
- # Get sentiment data
- sentiment = self.sentiment_skill.run(query="protocol sentiment")
-
- # Calculate optimal yield
- yield_data = self.yield_service.process(sentiment_data=sentiment)
-
- return QueryResponse(answers=[yield_data])
-```
-
-### Service Orchestration
-
-Services can orchestrate multiple operations:
-
-```python
-class ProtocolManagementService(Service):
- def process(self, request):
- # Analyze market conditions
- market_data = self.market_service.get_conditions()
-
- # Get community sentiment
- sentiment = self.sentiment_service.analyze()
-
- # Calculate optimal parameters
- params = self.optimization_service.optimize(market_data, sentiment)
-
- return ServiceResponse(recommendations=params)
-```
-
-## Best Practices
-
-### Skill Development
-
-- **Single Responsibility** - Each skill should have one clear purpose
-- **Consistent Interface** - Follow the standard Skill interface
-- **Error Handling** - Robust error handling and user feedback
-- **Documentation** - Clear documentation of inputs and outputs
-
-### Service Design
-
-- **Stateless Operations** - Services should be stateless when possible
-- **Idempotent Operations** - Operations should be safely repeatable
-- **Resource Management** - Proper cleanup of external resources
-- **Monitoring** - Comprehensive logging and metrics
-
-### Integration Guidelines
-
-- **Loose Coupling** - Minimize dependencies between components
-- **Standard Interfaces** - Use consistent data formats and APIs
-- **Error Propagation** - Proper error handling across component boundaries
-- **Testing** - Comprehensive unit and integration testing
-
-This modular architecture enables Talos to provide sophisticated protocol management capabilities while maintaining flexibility and extensibility for future enhancements.
diff --git a/docs/cli/arbiscan.md b/docs/cli/arbiscan.md
deleted file mode 100644
index d61fffa2..00000000
--- a/docs/cli/arbiscan.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# Arbiscan CLI
-
-The Arbiscan CLI module provides commands for retrieving smart contract source code from Arbitrum blockchain networks.
-
-## Commands
-
-### `get-source-code` - Retrieve Contract Source Code
-
-Gets the source code of a verified smart contract from Arbiscan.
-
-```bash
-uv run talos arbiscan get-source-code [options]
-```
-
-**Arguments:**
-- `contract_address`: The contract address to get source code for (required)
-
-**Options:**
-- `--api-key, -k`: Optional API key for higher rate limits
-- `--chain-id, -c`: Chain ID (default: 42161 for Arbitrum One)
-- `--format, -f`: Output format - 'formatted', 'json', or 'source-only' (default: 'formatted')
-
-## Supported Networks
-
-| Chain ID | Network | Description |
-|----------|---------|-------------|
-| 42161 | Arbitrum One | Main Arbitrum network |
-| 42170 | Arbitrum Nova | Arbitrum Nova network |
-| 421614 | Arbitrum Sepolia | Arbitrum testnet |
-
-## Usage Examples
-
-### Basic Usage
-
-```bash
-# Get source code for a contract on Arbitrum One
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678
-
-# Get source code with API key for higher rate limits
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --api-key your_api_key
-```
-
-### Different Networks
-
-```bash
-# Get source code from Arbitrum Nova
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --chain-id 42170
-
-# Get source code from Arbitrum Sepolia testnet
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --chain-id 421614
-```
-
-### Output Formats
-
-```bash
-# Formatted output (default) - human-readable with contract details
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --format formatted
-
-# JSON output - structured data format
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --format json
-
-# Source code only - just the contract source code
-uv run talos arbiscan get-source-code 0x1234567890abcdef1234567890abcdef12345678 --format source-only
-```
-
-## Output Information
-
-### Formatted Output
-
-The formatted output includes:
-- **Contract Name**: Name of the smart contract
-- **Compiler Version**: Solidity compiler version used
-- **Optimization Used**: Whether compiler optimization was enabled
-- **Optimization Runs**: Number of optimization runs (if enabled)
-- **License Type**: Contract license information
-- **Proxy Implementation**: Implementation address (for proxy contracts)
-- **Source Code**: Complete contract source code
-
-### JSON Output
-
-The JSON output provides structured data with all contract information in a machine-readable format.
-
-### Source-Only Output
-
-Returns only the contract source code without additional metadata.
-
-## API Key Setup
-
-### Environment Variable
-
-Set your Arbiscan API key as an environment variable:
-
-```bash
-export ARBISCAN_API_KEY=your_api_key_here
-```
-
-### Command Line Option
-
-Alternatively, provide the API key directly:
-
-```bash
-uv run talos arbiscan get-source-code 0x1234... --api-key your_api_key_here
-```
-
-### Getting an API Key
-
-1. Visit [https://arbiscan.io/apis](https://arbiscan.io/apis)
-2. Create a free account
-3. Generate an API key
-4. Use the key for higher rate limits and better reliability
-
-## Rate Limits
-
-- **Without API Key**: Limited requests per minute
-- **With API Key**: Higher rate limits and better reliability
-- **Free Tier**: Sufficient for most use cases
-- **Paid Tiers**: Available for high-volume usage
-
-## Error Handling
-
-The command includes comprehensive error handling for:
-
-### API Errors
-- Missing or invalid API key
-- Rate limit exceeded
-- Invalid contract address
-- Contract not verified
-- Network connectivity issues
-
-### Input Validation
-- Invalid contract address format
-- Unsupported chain ID
-- Invalid output format
-
-### Example Error Messages
-
-```bash
-# Missing API key error
-Error: Arbiscan API key is required to get contract source code.
-Please provide an API key using the --api-key option.
-You can get a free API key from https://arbiscan.io/apis
-
-# Invalid contract address
-Error: Invalid contract address format
-
-# Contract not verified
-Error: Contract source code not verified on Arbiscan
-```
-
-## Integration
-
-The Arbiscan CLI integrates with:
-- Smart contract analysis workflows
-- Security audit processes
-- Development and debugging tools
-- Automated contract verification systems
-
-## Use Cases
-
-- **Security Analysis**: Review contract source code for vulnerabilities
-- **Development**: Study implementation patterns and best practices
-- **Auditing**: Verify contract functionality and security
-- **Research**: Analyze DeFi protocols and smart contract architectures
-- **Integration**: Retrieve contract ABIs and interfaces for development
diff --git a/docs/cli/crypto.md b/docs/cli/crypto.md
deleted file mode 100644
index b263a443..00000000
--- a/docs/cli/crypto.md
+++ /dev/null
@@ -1,419 +0,0 @@
-# Cryptography Commands
-
-The Talos CLI provides cryptographic operations for secure key management, encryption, and decryption using industry-standard RSA encryption.
-
-## Overview
-
-The cryptography commands enable:
-- RSA key pair generation
-- Public key retrieval and sharing
-- Data encryption using public keys
-- Data decryption using private keys
-- Secure key storage and management
-
-## Commands
-
-### `generate-keys` - Generate RSA Key Pair
-
-Generate a new RSA key pair for encryption and decryption operations.
-
-**Usage:**
-```bash
-uv run talos generate-keys
-```
-
-**Options:**
-- `--key-size`: RSA key size in bits (default: 2048, options: 1024, 2048, 4096)
-- `--output-dir`: Directory to store keys (default: `.keys/`)
-- `--overwrite`: Overwrite existing keys if they exist
-
-**Examples:**
-```bash
-# Generate default 2048-bit keys
-uv run talos generate-keys
-
-# Generate high-security 4096-bit keys
-uv run talos generate-keys --key-size 4096
-
-# Generate keys in custom directory
-uv run talos generate-keys --output-dir /secure/keys/
-
-# Overwrite existing keys
-uv run talos generate-keys --overwrite
-```
-
-**Output:**
-```
-=== RSA Key Generation ===
-
-Key Size: 2048 bits
-Output Directory: .keys/
-
-✅ Private key generated: .keys/private_key.pem
-✅ Public key generated: .keys/public_key.pem
-
-Key fingerprint: SHA256:abc123def456...
-
-⚠️ Security Notice:
-- Keep your private key secure and never share it
-- The public key can be safely shared with others
-- Back up your keys in a secure location
-```
-
-**Generated Files:**
-- `private_key.pem` - Private key (keep secure)
-- `public_key.pem` - Public key (can be shared)
-
-### `get-public-key` - Retrieve Public Key
-
-Display the current public key for sharing with others.
-
-**Usage:**
-```bash
-uv run talos get-public-key
-```
-
-**Options:**
-- `--format`: Output format (pem, der, base64)
-- `--key-dir`: Directory containing keys (default: `.keys/`)
-- `--fingerprint`: Show key fingerprint
-
-**Examples:**
-```bash
-# Display public key in PEM format
-uv run talos get-public-key
-
-# Show key with fingerprint
-uv run talos get-public-key --fingerprint
-
-# Export in base64 format
-uv run talos get-public-key --format base64
-
-# Use keys from custom directory
-uv run talos get-public-key --key-dir /secure/keys/
-```
-
-**Output:**
-```
-=== Public Key ===
-
------BEGIN PUBLIC KEY-----
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1234567890abcdef...
-...
------END PUBLIC KEY-----
-
-Fingerprint: SHA256:abc123def456789...
-Key Size: 2048 bits
-Created: 2024-01-15 10:30:00 UTC
-```
-
-### `encrypt` - Encrypt Data
-
-Encrypt data using a public key (yours or someone else's).
-
-**Usage:**
-```bash
-uv run talos encrypt ""
-```
-
-**Arguments:**
-- `data`: Text data to encrypt
-- `public_key_file`: Path to public key file
-
-**Options:**
-- `--output`: Output file for encrypted data
-- `--format`: Output format (base64, hex, binary)
-
-**Examples:**
-```bash
-# Encrypt a message using your own public key
-uv run talos encrypt "Secret message" .keys/public_key.pem
-
-# Encrypt using someone else's public key
-uv run talos encrypt "Confidential data" /path/to/their/public_key.pem
-
-# Save encrypted data to file
-uv run talos encrypt "Important info" public_key.pem --output encrypted.txt
-
-# Output in hex format
-uv run talos encrypt "Data" public_key.pem --format hex
-```
-
-**Output:**
-```
-=== Encryption Complete ===
-
-Original Data: "Secret message"
-Public Key: .keys/public_key.pem
-Encrypted Data (Base64):
-gAAAAABhZ1234567890abcdef...
-
-✅ Data encrypted successfully
-📋 Copy the encrypted data above to share securely
-```
-
-### `decrypt` - Decrypt Data
-
-Decrypt data using your private key.
-
-**Usage:**
-```bash
-uv run talos decrypt ""
-```
-
-**Arguments:**
-- `encrypted_data`: Base64-encoded encrypted data
-
-**Options:**
-- `--key-dir`: Directory containing private key (default: `.keys/`)
-- `--input-file`: Read encrypted data from file
-- `--format`: Input format (base64, hex, binary)
-
-**Examples:**
-```bash
-# Decrypt base64-encoded data
-uv run talos decrypt "gAAAAABhZ1234567890abcdef..."
-
-# Decrypt from file
-uv run talos decrypt --input-file encrypted.txt
-
-# Decrypt hex-encoded data
-uv run talos decrypt "48656c6c6f20576f726c64" --format hex
-
-# Use private key from custom directory
-uv run talos decrypt "encrypted_data" --key-dir /secure/keys/
-```
-
-**Output:**
-```
-=== Decryption Complete ===
-
-Encrypted Data: gAAAAABhZ1234567890abcdef...
-Private Key: .keys/private_key.pem
-
-✅ Decryption successful
-Decrypted Data: "Secret message"
-```
-
-## Security Features
-
-### Key Storage
-
-**Default Location:**
-- Keys stored in `.keys/` directory
-- Private key permissions set to 600 (owner read/write only)
-- Public key permissions set to 644 (world readable)
-
-**Security Measures:**
-- Private keys never transmitted or logged
-- Secure random number generation
-- Industry-standard RSA implementation
-- Automatic permission setting
-
-### Encryption Standards
-
-**RSA Configuration:**
-- PKCS#1 OAEP padding
-- SHA-256 hash function
-- MGF1 mask generation
-- Secure random padding
-
-**Key Sizes:**
-- 1024-bit: Legacy support (not recommended)
-- 2048-bit: Standard security (recommended)
-- 4096-bit: High security (slower performance)
-
-## Advanced Usage
-
-### Secure Communication Workflow
-
-**Setup (one time):**
-```bash
-# Generate your key pair
-uv run talos generate-keys --key-size 2048
-
-# Share your public key
-uv run talos get-public-key > my_public_key.pem
-```
-
-**Sending encrypted messages:**
-```bash
-# Encrypt message for recipient
-uv run talos encrypt "Confidential message" recipient_public_key.pem
-
-# Send the encrypted output to recipient
-```
-
-**Receiving encrypted messages:**
-```bash
-# Decrypt received message
-uv run talos decrypt "received_encrypted_data"
-```
-
-### Batch Operations
-
-**Encrypt multiple files:**
-```bash
-#!/bin/bash
-# encrypt-files.sh
-
-public_key="recipient_public_key.pem"
-
-for file in *.txt; do
- echo "Encrypting $file..."
- content=$(cat "$file")
- encrypted=$(uv run talos encrypt "$content" "$public_key")
- echo "$encrypted" > "$file.encrypted"
-done
-```
-
-**Decrypt multiple messages:**
-```bash
-#!/bin/bash
-# decrypt-messages.sh
-
-for encrypted_file in *.encrypted; do
- echo "Decrypting $encrypted_file..."
- encrypted_data=$(cat "$encrypted_file")
- decrypted=$(uv run talos decrypt "$encrypted_data")
- echo "$decrypted" > "${encrypted_file%.encrypted}.decrypted"
-done
-```
-
-### Integration with Other Commands
-
-**Secure GitHub token storage:**
-```bash
-# Encrypt your GitHub token
-encrypted_token=$(uv run talos encrypt "$GITHUB_API_TOKEN" public_key.pem)
-
-# Store encrypted token safely
-echo "$encrypted_token" > github_token.encrypted
-
-# Later, decrypt when needed
-GITHUB_API_TOKEN=$(uv run talos decrypt "$(cat github_token.encrypted)")
-```
-
-## Configuration
-
-### Key Management Settings
-
-```yaml
-cryptography:
- key_storage:
- directory: ".keys"
- private_key_permissions: "600"
- public_key_permissions: "644"
- backup_enabled: true
- backup_directory: ".keys/backup"
-
- encryption:
- default_key_size: 2048
- padding: "OAEP"
- hash_algorithm: "SHA256"
- mgf: "MGF1"
-
- security:
- secure_delete: true
- audit_operations: true
- require_confirmation: ["generate-keys --overwrite"]
-```
-
-### Backup Configuration
-
-```yaml
-cryptography:
- backup:
- enabled: true
- schedule: "daily"
- retention_days: 30
- encryption: true
- remote_backup:
- enabled: false
- provider: "s3"
- bucket: "secure-key-backup"
-```
-
-## Error Handling
-
-### Common Issues
-
-**Missing Keys:**
-```
-Error: Private key not found at .keys/private_key.pem
-Solution: Run 'uv run talos generate-keys' to create keys
-```
-
-**Invalid Encrypted Data:**
-```
-Error: Failed to decrypt data - invalid format
-Solution: Verify encrypted data is complete and in correct format
-```
-
-**Permission Denied:**
-```
-Error: Permission denied accessing private key
-Solution: Check file permissions or run with appropriate privileges
-```
-
-**Key Size Mismatch:**
-```
-Error: Data too large for key size
-Solution: Use larger key size or encrypt smaller data chunks
-```
-
-### Security Warnings
-
-**Weak Key Size:**
-```
-Warning: 1024-bit keys are not recommended for new applications
-Recommendation: Use 2048-bit or larger keys
-```
-
-**Insecure Storage:**
-```
-Warning: Private key has insecure permissions
-Action: Automatically fixing permissions to 600
-```
-
-## Best Practices
-
-### Key Management
-
-**Generation:**
-- Use 2048-bit keys minimum for new applications
-- Generate keys on secure, trusted systems
-- Use hardware security modules for high-value keys
-
-**Storage:**
-- Keep private keys secure and never share them
-- Back up keys in multiple secure locations
-- Use encrypted storage for key backups
-- Regularly rotate keys for long-term use
-
-**Distribution:**
-- Public keys can be shared freely
-- Verify public key authenticity through secure channels
-- Use key fingerprints to verify key integrity
-
-### Operational Security
-
-**Data Handling:**
-- Encrypt sensitive data before storage or transmission
-- Use secure channels for sharing encrypted data
-- Verify decryption results before acting on them
-- Clear sensitive data from memory after use
-
-**Access Control:**
-- Limit access to private keys
-- Use principle of least privilege
-- Monitor key usage and access
-- Implement key escrow for critical applications
-
-**Compliance:**
-- Follow organizational security policies
-- Meet regulatory requirements for data protection
-- Document key management procedures
-- Regular security audits and reviews
-
-The cryptography commands provide enterprise-grade security for protecting sensitive data and communications within the Talos ecosystem.
diff --git a/docs/cli/github.md b/docs/cli/github.md
deleted file mode 100644
index de69d87d..00000000
--- a/docs/cli/github.md
+++ /dev/null
@@ -1,379 +0,0 @@
-# GitHub Commands
-
-The Talos CLI includes comprehensive GitHub integration for repository management, pull request reviews, and development workflow automation.
-
-## Setup
-
-### Authentication
-
-Set your GitHub API token as an environment variable:
-
-```bash
-export GITHUB_API_TOKEN=your_github_token_here
-```
-
-### Repository Configuration
-
-Specify the target repository in two ways:
-
-1. **Environment variable** (recommended for repeated use):
- ```bash
- export GITHUB_REPO=owner/repo
- uv run talos github get-prs
- ```
-
-2. **Command line argument**:
- ```bash
- uv run talos github get-prs --repo owner/repo
- ```
-
-## Commands
-
-### `get-prs` - List Pull Requests
-
-List pull requests for a repository with filtering options.
-
-**Basic Usage:**
-```bash
-# List open PRs (default)
-uv run talos github get-prs --repo microsoft/vscode
-
-# Using environment variable
-export GITHUB_REPO=microsoft/vscode
-uv run talos github get-prs
-```
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-- `--state`: PR state - 'open' (default), 'closed', or 'all'
-
-**Examples:**
-```bash
-# List all PRs (open, closed, merged)
-uv run talos github get-prs --repo microsoft/vscode --state all
-
-# List closed PRs only
-uv run talos github get-prs --repo microsoft/vscode --state closed
-```
-
-**Output Format:**
-```
-PR #123: Fix memory leak in parser
- Author: developer@example.com
- State: open
- Created: 2024-01-15
- Updated: 2024-01-16
-
-PR #122: Add new API endpoint
- Author: contributor@example.com
- State: merged
- Created: 2024-01-14
- Merged: 2024-01-15
-```
-
-### `review-pr` - AI-Powered PR Review
-
-Perform comprehensive AI analysis of pull requests with security and quality scoring.
-
-**Basic Usage:**
-```bash
-# Review a PR (display results only)
-uv run talos github review-pr 123 --repo microsoft/vscode
-
-# Review and post the review as a comment on GitHub
-uv run talos github review-pr 123 --repo microsoft/vscode --post
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to review (required)
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-- `--post`: Post the review as a comment on the PR
-- `--auto-approve`: Automatically approve if criteria are met
-
-**Advanced Usage:**
-```bash
-# Review with auto-approval if criteria are met
-uv run talos github review-pr 123 --repo microsoft/vscode --auto-approve
-
-# Review multiple PRs
-for pr in 123 124 125; do
- uv run talos github review-pr $pr --repo microsoft/vscode --post
-done
-```
-
-**Review Output:**
-
-The review includes comprehensive analysis:
-
-```
-=== PR Review Analysis ===
-
-Security Score: 85/100
-Quality Score: 92/100
-Recommendation: APPROVE
-
-=== Security Analysis ===
-✅ No hardcoded secrets detected
-✅ Input validation present
-⚠️ Consider adding rate limiting to new API endpoint
-✅ Authentication checks in place
-
-=== Quality Analysis ===
-✅ Code follows project style guidelines
-✅ Adequate test coverage (87%)
-✅ Documentation updated
-⚠️ Consider adding error handling for edge case
-
-=== Detailed Findings ===
-1. New API endpoint properly validates input parameters
-2. Tests cover main functionality but missing edge case tests
-3. Documentation clearly explains new features
-4. No breaking changes detected
-
-=== Recommendations ===
-- Add rate limiting to prevent abuse
-- Include tests for malformed input handling
-- Consider adding metrics collection
-
-Overall: This PR introduces valuable functionality with good security practices.
-Minor improvements suggested but safe to merge.
-```
-
-### `approve-pr` - Force Approve PR
-
-Approve a pull request without AI analysis (use with caution).
-
-**Usage:**
-```bash
-uv run talos github approve-pr 123 --repo microsoft/vscode
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to approve (required)
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-
-**When to Use:**
-- Emergency fixes that need immediate approval
-- PRs that have been manually reviewed
-- Trusted contributors with pre-approved changes
-
-### `merge-pr` - Merge Pull Request
-
-Merge an approved pull request.
-
-**Usage:**
-```bash
-uv run talos github merge-pr 123 --repo microsoft/vscode
-```
-
-**Arguments:**
-- `pr_number`: Pull request number to merge (required)
-
-**Options:**
-- `--repo, -r`: Repository in format 'owner/repo'
-
-**Prerequisites:**
-- PR must be approved
-- All required checks must pass
-- No merge conflicts
-- Sufficient permissions
-
-## Workflow Examples
-
-### Daily PR Review Workflow
-
-```bash
-#!/bin/bash
-# daily-review.sh
-
-export GITHUB_REPO=myorg/myproject
-
-echo "=== Daily PR Review ==="
-
-# List all open PRs
-echo "Open PRs:"
-uv run talos github get-prs
-
-# Review each open PR
-for pr in $(uv run talos github get-prs --format=numbers); do
- echo "Reviewing PR #$pr..."
- uv run talos github review-pr $pr --post
-done
-
-echo "Review complete!"
-```
-
-### Automated Security Review
-
-```bash
-#!/bin/bash
-# security-review.sh
-
-export GITHUB_REPO=myorg/sensitive-project
-
-# Get PRs from external contributors
-external_prs=$(uv run talos github get-prs --external-only)
-
-for pr in $external_prs; do
- echo "Security review for external PR #$pr"
-
- # Perform detailed review without auto-approval
- uv run talos github review-pr $pr --security-focus --post
-
- # Only approve if security score > 90
- score=$(uv run talos github review-pr $pr --get-security-score)
- if [ $score -gt 90 ]; then
- uv run talos github approve-pr $pr
- echo "PR #$pr approved (security score: $score)"
- else
- echo "PR #$pr requires manual review (security score: $score)"
- fi
-done
-```
-
-### Release Preparation
-
-```bash
-#!/bin/bash
-# release-prep.sh
-
-export GITHUB_REPO=myorg/myproject
-
-echo "=== Release Preparation ==="
-
-# Review all PRs targeted for release
-release_prs=$(uv run talos github get-prs --label="release-candidate")
-
-for pr in $release_prs; do
- echo "Final review for release PR #$pr"
-
- # Comprehensive review with strict criteria
- uv run talos github review-pr $pr --strict-mode --post
-
- # Auto-approve only high-quality PRs
- uv run talos github review-pr $pr --auto-approve --min-quality=95
-done
-
-echo "Release review complete!"
-```
-
-## Configuration
-
-### Review Criteria
-
-Configure review criteria in your Talos configuration:
-
-```yaml
-github:
- review:
- security:
- min_score: 80
- required_checks:
- - "no_hardcoded_secrets"
- - "input_validation"
- - "authentication"
-
- quality:
- min_score: 85
- required_checks:
- - "test_coverage"
- - "documentation"
- - "style_compliance"
-
- auto_approve:
- enabled: true
- min_security_score: 90
- min_quality_score: 90
- trusted_authors:
- - "senior-dev@company.com"
- - "security-team@company.com"
-```
-
-### Notification Settings
-
-```yaml
-github:
- notifications:
- slack_webhook: "https://hooks.slack.com/..."
- email_alerts: true
-
- triggers:
- - "security_score_low"
- - "quality_score_low"
- - "external_contributor"
- - "large_pr"
-```
-
-## Error Handling
-
-The GitHub commands include comprehensive error handling:
-
-### Common Errors
-
-**Missing Repository:**
-```
-Error: Repository not specified
-Solution: Set GITHUB_REPO environment variable or use --repo flag
-```
-
-**Invalid Token:**
-```
-Error: GitHub API authentication failed
-Solution: Check GITHUB_API_TOKEN environment variable
-```
-
-**PR Not Found:**
-```
-Error: Pull request #123 not found
-Solution: Verify PR number and repository access
-```
-
-**Insufficient Permissions:**
-```
-Error: Insufficient permissions to approve PR
-Solution: Check repository permissions for your GitHub token
-```
-
-### Rate Limiting
-
-GitHub API has rate limits. Talos handles this automatically:
-
-- **Automatic Backoff** - Waits when rate limit is reached
-- **Batch Operations** - Optimizes API calls for efficiency
-- **Progress Updates** - Shows progress for long-running operations
-
-### Network Issues
-
-**Retry Logic:**
-- Automatic retry for transient network errors
-- Exponential backoff for repeated failures
-- Clear error messages for permanent failures
-
-## Best Practices
-
-### Security
-
-- **Token Security** - Store GitHub tokens securely
-- **Permission Scope** - Use minimal required permissions
-- **Review External PRs** - Always review PRs from external contributors
-- **Audit Logs** - Monitor all GitHub operations
-
-### Efficiency
-
-- **Batch Reviews** - Review multiple PRs in scripts
-- **Environment Variables** - Use GITHUB_REPO for repeated operations
-- **Filtering** - Use state and label filters to focus on relevant PRs
-- **Automation** - Integrate with CI/CD pipelines
-
-### Quality Assurance
-
-- **Consistent Reviews** - Use standardized review criteria
-- **Documentation** - Ensure all reviews are documented
-- **Follow-up** - Track and follow up on review recommendations
-- **Continuous Improvement** - Regularly update review criteria
-
-The GitHub integration provides powerful tools for maintaining code quality and security while automating routine development workflows.
diff --git a/docs/cli/interactive.md b/docs/cli/interactive.md
deleted file mode 100644
index 4dd47bba..00000000
--- a/docs/cli/interactive.md
+++ /dev/null
@@ -1,279 +0,0 @@
-# Interactive Mode
-
-Interactive mode provides a conversational interface for working with Talos, allowing for natural language queries and continuous dialogue.
-
-## Starting Interactive Mode
-
-Launch interactive mode by running Talos without arguments:
-
-```bash
-uv run talos
-```
-
-You'll see a prompt where you can start conversing:
-
-```
-Talos AI Agent - Interactive Mode
-Type 'exit' to quit
-
->>
-```
-
-## Basic Usage
-
-### Simple Queries
-
-Ask questions in natural language:
-
-```
->> What are your main capabilities?
->> How is the current market sentiment?
->> What governance proposals need review?
-```
-
-### Complex Requests
-
-Request detailed analysis and recommendations:
-
-```
->> Analyze the sentiment around "yield farming" on Twitter and recommend APR adjustments
->> Review the latest GitHub PRs and identify any security concerns
->> Evaluate the community response to our latest protocol update
-```
-
-### Multi-turn Conversations
-
-Talos maintains context across the conversation:
-
-```
->> Analyze sentiment for "DeFi protocols"
->> What are the main concerns mentioned?
->> How should we address these concerns in our next update?
->> Draft a response strategy
-```
-
-## Available Commands
-
-### Protocol Management
-
-```
->> Check treasury performance
->> Analyze staking metrics
->> Review governance proposals
->> Calculate optimal APR
-```
-
-### Community Engagement
-
-```
->> What's the community saying about our protocol?
->> Analyze Twitter sentiment for "our_protocol_name"
->> Check for mentions and discussions
->> Draft a community update
-```
-
-### Development Oversight
-
-```
->> Review open GitHub PRs
->> Check for security issues in recent commits
->> Analyze code quality metrics
->> Review contributor activity
-```
-
-### Market Analysis
-
-```
->> What are current DeFi trends?
->> Analyze competitor protocols
->> Check yield farming opportunities
->> Review market volatility
-```
-
-## Advanced Features
-
-### Context Awareness
-
-Talos remembers previous conversations and can reference earlier topics:
-
-```
->> Remember our discussion about APR optimization yesterday?
->> Based on our previous analysis, what's changed?
->> Update the recommendations from our last conversation
-```
-
-### Multi-step Workflows
-
-Break complex tasks into steps:
-
-```
->> I need to prepare for the governance vote next week
->> First, analyze community sentiment
->> Then review the proposal details
->> Finally, prepare talking points for the discussion
-```
-
-### Real-time Updates
-
-Get live updates during long-running operations:
-
-```
->> Start monitoring Twitter for protocol mentions
->> Analyze the next 100 tweets about DeFi
->> Keep me updated on any significant sentiment changes
-```
-
-## Conversation Management
-
-### History
-
-Talos maintains conversation history within the session:
-
-- Previous queries and responses are remembered
-- Context is preserved across multiple exchanges
-- You can reference earlier parts of the conversation
-
-### Memory
-
-Important information is stored in persistent memory:
-
-- Key insights and decisions
-- Protocol-specific information
-- User preferences and patterns
-- Historical analysis results
-
-### Session Control
-
-```
->> clear history # Clear current session history
->> save conversation # Save important parts to memory
->> load previous session # Reference previous conversations
-```
-
-## Interactive Commands
-
-### Help and Information
-
-```
->> help # General help
->> what can you do? # Capability overview
->> show available commands # Command reference
->> explain [topic] # Detailed explanations
-```
-
-### Status and Monitoring
-
-```
->> status # System status
->> check connections # API connectivity
->> show recent activity # Recent operations
->> monitor [service] # Real-time monitoring
-```
-
-### Configuration
-
-```
->> show config # Current configuration
->> set preference [key] [value] # Update preferences
->> reset settings # Reset to defaults
-```
-
-## Best Practices
-
-### Effective Communication
-
-**Be Specific**: Provide clear, specific requests
-```
-Good: "Analyze Twitter sentiment for 'yield farming' in the last 24 hours"
-Poor: "Check Twitter"
-```
-
-**Provide Context**: Give relevant background information
-```
-Good: "We're considering increasing APR from 5% to 7%. Analyze community sentiment about yield changes."
-Poor: "Should we change APR?"
-```
-
-**Ask Follow-up Questions**: Dig deeper into analysis
-```
->> What are the main risks identified?
->> How confident are you in this recommendation?
->> What additional data would improve this analysis?
-```
-
-### Workflow Optimization
-
-**Use Natural Language**: Don't worry about exact command syntax
-```
->> "Can you help me understand the latest governance proposal?"
->> "I need to review PRs that might have security issues"
->> "What's the community mood about our recent changes?"
-```
-
-**Combine Operations**: Request multiple related tasks
-```
->> "Analyze market sentiment, check our GitHub activity, and recommend any protocol adjustments"
-```
-
-**Iterate and Refine**: Build on previous responses
-```
->> "That analysis is helpful. Can you focus specifically on the security concerns?"
->> "Based on that sentiment data, what's our best response strategy?"
-```
-
-### Session Management
-
-**Save Important Results**: Preserve key insights
-```
->> "Save this analysis to memory for future reference"
->> "Remember this decision for next week's review"
-```
-
-**Reference Previous Work**: Build on past conversations
-```
->> "Based on last week's sentiment analysis, what's changed?"
->> "Update the recommendations from our previous discussion"
-```
-
-## Troubleshooting
-
-### Common Issues
-
-**No Response**: Check API key configuration
-```
->> status
->> check connections
-```
-
-**Slow Responses**: Large queries may take time
-```
->> "This is taking a while, can you give me a status update?"
-```
-
-**Unclear Results**: Ask for clarification
-```
->> "Can you explain that recommendation in more detail?"
->> "What data did you use for this analysis?"
-```
-
-### Error Recovery
-
-**Connection Issues**: Talos will attempt to reconnect automatically
-```
->> "I see there was a connection issue. Can you retry that analysis?"
-```
-
-**Invalid Requests**: Talos will ask for clarification
-```
->> "I'm not sure what you mean. Can you rephrase that request?"
-```
-
-### Getting Help
-
-```
->> help # General help
->> troubleshoot # Common issues
->> contact support # How to get additional help
-```
-
-Interactive mode provides the most natural and powerful way to work with Talos, enabling sophisticated protocol management through conversational AI.
diff --git a/docs/cli/memory.md b/docs/cli/memory.md
deleted file mode 100644
index 56dd290c..00000000
--- a/docs/cli/memory.md
+++ /dev/null
@@ -1,159 +0,0 @@
-# Memory CLI
-
-The memory CLI module provides commands for managing and searching the agent's persistent memory system.
-
-## Commands
-
-### `list` - List Memories
-
-List all memories with optional user filtering.
-
-```bash
-uv run talos memory list [options]
-```
-
-**Options:**
-- `--user-id, -u`: User ID to filter memories by
-- `--filter-user`: Filter memories by a different user
-- `--use-database`: Use database backend instead of files (default: true)
-- `--verbose, -v`: Enable verbose output
-
-### `search` - Search Memories
-
-Search memories using semantic similarity with optional user filtering.
-
-```bash
-uv run talos memory search [options]
-```
-
-**Arguments:**
-- `query`: Search query for memories (required)
-
-**Options:**
-- `--user-id, -u`: User ID to search memories for
-- `--filter-user`: Filter memories by a different user
-- `--limit, -l`: Maximum number of results to return (default: 5)
-- `--use-database`: Use database backend instead of files (default: true)
-- `--verbose, -v`: Enable verbose output
-
-### `flush` - Flush Memories
-
-Flush unsaved memories to disk or delete user memories from database.
-
-```bash
-uv run talos memory flush [options]
-```
-
-**Options:**
-- `--user-id, -u`: User ID for database backend (if not provided with database backend, flushes ALL memories after confirmation)
-- `--use-database`: Use database backend instead of files (default: true)
-- `--verbose, -v`: Enable verbose output
-
-## Usage Examples
-
-### Listing Memories
-
-```bash
-# List all memories
-uv run talos memory list
-
-# List memories for a specific user
-uv run talos memory list --user-id user123
-
-# List memories with verbose output
-uv run talos memory list --verbose
-
-# Use file-based backend
-uv run talos memory list --no-use-database
-```
-
-### Searching Memories
-
-```bash
-# Basic semantic search
-uv run talos memory search "governance proposal"
-
-# Search with custom limit
-uv run talos memory search "twitter sentiment" --limit 10
-
-# Search for specific user's memories
-uv run talos memory search "market analysis" --user-id user123
-
-# Search with verbose output
-uv run talos memory search "protocol upgrade" --verbose
-```
-
-### Managing Memory Storage
-
-```bash
-# Flush unsaved memories for a specific user
-uv run talos memory flush --user-id user123
-
-# Flush all memories (requires confirmation)
-uv run talos memory flush
-
-# Flush with file-based backend
-uv run talos memory flush --no-use-database
-```
-
-## Memory Backends
-
-### Database Backend (Default)
-
-The database backend provides:
-- Multi-user support with user isolation
-- Persistent storage across sessions
-- Efficient semantic search using vector embeddings
-- User management and cleanup capabilities
-
-### File Backend
-
-The file backend provides:
-- Simple file-based storage
-- Single-user operation
-- Local memory and history files
-- No user isolation
-
-## Memory Structure
-
-Each memory contains:
-- **Description**: Text content of the memory
-- **Timestamp**: When the memory was created
-- **Metadata**: Additional context and tags
-- **Embeddings**: Vector representations for semantic search
-
-## Environment Variables
-
-- `OPENAI_API_KEY`: Required for generating embeddings for semantic search
-
-## Database Operations
-
-### User Management
-
-When using the database backend:
-- Temporary user IDs are generated if not provided
-- User memories are isolated from each other
-- Cleanup operations can remove old temporary users
-
-### Memory Persistence
-
-- Memories are automatically saved to the database
-- Unsaved memories can be flushed manually
-- Search operations use vector similarity matching
-
-## Error Handling
-
-The memory CLI includes comprehensive error handling for:
-- Database connection issues
-- Missing user IDs
-- Invalid search queries
-- File system permissions (file backend)
-- API connectivity for embeddings
-
-## Integration
-
-The memory system integrates with:
-- Main Talos agent for conversation history
-- All CLI modules for persistent context
-- Database cleanup operations
-- User management system
diff --git a/docs/cli/overview.md b/docs/cli/overview.md
deleted file mode 100644
index d5f09b3b..00000000
--- a/docs/cli/overview.md
+++ /dev/null
@@ -1,250 +0,0 @@
-# CLI Overview
-
-The Talos CLI is the main entry point for interacting with the Talos agent. It provides both interactive and non-interactive modes for different use cases.
-
-## Installation
-
-The CLI is installed as part of the `talos` package. After installation, you can run:
-
-```bash
-uv run talos
-```
-
-## Usage Modes
-
-### Interactive Mode
-
-To enter interactive mode, run `talos` without any arguments:
-
-```bash
-uv run talos
-```
-
-This starts a continuous conversation where you can:
-- Ask questions about protocol management
-- Request analysis and recommendations
-- Execute commands and workflows
-- Get help and guidance
-
-Example session:
-```
->> What are your main capabilities?
->> Analyze the sentiment around "DeFi protocols" on Twitter
->> Help me evaluate a governance proposal
->> exit
-```
-
-Type `exit` to quit the interactive session.
-
-### Non-Interactive Mode
-
-In non-interactive mode, you can run a single query and the agent will exit:
-
-```bash
-uv run talos "your query here"
-```
-
-Examples:
-```bash
-uv run talos "What is the current market sentiment?"
-uv run talos "Analyze the latest governance proposal"
-uv run talos "Check GitHub PRs for security issues"
-```
-
-### Daemon Mode
-
-Run Talos continuously for scheduled operations and automated tasks:
-
-```bash
-uv run talos daemon
-```
-
-The daemon mode:
-- Executes scheduled jobs automatically
-- Monitors for new proposals and PRs
-- Performs continuous market analysis
-- Handles automated responses and alerts
-- Can be gracefully shutdown with SIGTERM or SIGINT
-
-## Command Structure
-
-The Talos CLI uses a hierarchical command structure:
-
-```
-talos [global-options] [command-options] [arguments]
-```
-
-### Global Options
-
-- `--help, -h` - Show help information
-- `--version` - Show version information
-- `--config` - Specify configuration file path
-- `--verbose, -v` - Enable verbose logging
-
-### Available Commands
-
-| Command | Description |
-|---------|-------------|
-| `twitter` | Twitter-related operations and sentiment analysis |
-| `github` | GitHub repository management and PR reviews |
-| `proposals` | Governance proposal evaluation |
-| `memory` | Memory management and search operations |
-| `arbiscan` | Arbitrum blockchain contract source code retrieval |
-| `generate-keys` | Generate RSA key pairs for encryption |
-| `get-public-key` | Retrieve the current public key |
-| `encrypt` | Encrypt data using public key |
-| `decrypt` | Decrypt data using private key |
-| `daemon` | Run in continuous daemon mode |
-| `cleanup-users` | Clean up temporary users and conversation data |
-| `db-stats` | Show database statistics |
-
-## Environment Variables
-
-### Required Variables
-
-```bash
-export OPENAI_API_KEY="your-openai-api-key"
-export PINATA_API_KEY="your-pinata-api-key"
-export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
-```
-
-### Optional Variables
-
-```bash
-export GITHUB_API_TOKEN="your-github-token" # For GitHub operations
-export TWITTER_BEARER_TOKEN="your-twitter-token" # For Twitter analysis
-export GITHUB_REPO="owner/repo" # Default repository
-export ARBISCAN_API_KEY="your-arbiscan-key" # For higher rate limits on Arbitrum data
-```
-
-## Configuration
-
-### Configuration File
-
-Talos can be configured using a YAML configuration file:
-
-```yaml
-# talos.yml
-api_keys:
- openai: "${OPENAI_API_KEY}"
- github: "${GITHUB_API_TOKEN}"
- twitter: "${TWITTER_BEARER_TOKEN}"
-
-defaults:
- github_repo: "owner/repo"
- twitter_query_limit: 100
-
-logging:
- level: "INFO"
- file: "talos.log"
-
-hypervisor:
- approval_timeout: 30
- max_pending_actions: 100
-```
-
-Specify the configuration file:
-```bash
-uv run talos --config talos.yml
-```
-
-### Environment File
-
-Create a `.env` file for convenience:
-
-```bash
-# .env
-OPENAI_API_KEY=your-openai-api-key
-PINATA_API_KEY=your-pinata-api-key
-PINATA_SECRET_API_KEY=your-pinata-secret-api-key
-GITHUB_API_TOKEN=your-github-token
-TWITTER_BEARER_TOKEN=your-twitter-bearer-token
-GITHUB_REPO=owner/repo
-```
-
-## Error Handling
-
-The CLI includes comprehensive error handling for:
-
-- **Missing API Keys** - Clear messages about required environment variables
-- **Network Issues** - Retry logic and timeout handling
-- **Invalid Commands** - Helpful suggestions for correct usage
-- **Permission Errors** - Guidance on required permissions
-- **Rate Limiting** - Automatic backoff and retry strategies
-
-## Logging
-
-### Log Levels
-
-- `DEBUG` - Detailed debugging information
-- `INFO` - General information about operations
-- `WARNING` - Warning messages about potential issues
-- `ERROR` - Error messages for failed operations
-- `CRITICAL` - Critical errors that may stop execution
-
-### Log Configuration
-
-```bash
-# Set log level
-export TALOS_LOG_LEVEL=DEBUG
-
-# Set log file
-export TALOS_LOG_FILE=talos.log
-
-# Enable verbose output
-uv run talos --verbose
-```
-
-## Getting Help
-
-### Command Help
-
-Get help for any command:
-
-```bash
-uv run talos --help
-uv run talos twitter --help
-uv run talos github --help
-```
-
-### Interactive Help
-
-In interactive mode, you can ask for help:
-
-```
->> help
->> what commands are available?
->> how do I analyze Twitter sentiment?
-```
-
-### Documentation
-
-- **CLI Reference** - Detailed command documentation
-- **Examples** - Common usage patterns and workflows
-- **Troubleshooting** - Solutions for common issues
-- **API Reference** - Technical details about the underlying APIs
-
-## Best Practices
-
-### Security
-
-- **Environment Variables** - Use environment variables for API keys
-- **File Permissions** - Secure configuration files with appropriate permissions
-- **Key Rotation** - Regularly rotate API keys and tokens
-- **Audit Logs** - Monitor CLI usage through log files
-
-### Performance
-
-- **Batch Operations** - Use batch commands when possible
-- **Caching** - Enable caching for frequently accessed data
-- **Rate Limiting** - Respect API rate limits to avoid throttling
-- **Resource Management** - Monitor memory and CPU usage
-
-### Workflow Integration
-
-- **Scripting** - Use non-interactive mode for automation
-- **CI/CD Integration** - Integrate with continuous integration pipelines
-- **Monitoring** - Set up alerts for daemon mode operations
-- **Backup** - Regular backup of configuration and data files
-
-This CLI provides a powerful interface for managing decentralized protocols through the Talos AI agent system.
diff --git a/docs/cli/proposals.md b/docs/cli/proposals.md
deleted file mode 100644
index b20d4841..00000000
--- a/docs/cli/proposals.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# Proposals CLI
-
-The proposals CLI module provides commands for evaluating governance proposals using AI analysis.
-
-## Commands
-
-### `eval` - Evaluate Proposal
-
-Evaluates a governance proposal from a file using AI analysis.
-
-```bash
-uv run talos proposals eval --file
-```
-
-**Arguments:**
-- `--file, -f`: Path to the proposal file (required)
-
-**Options:**
-- `--model-name`: LLM model to use (default: "gpt-5")
-- `--temperature`: Temperature for LLM generation (default: 0.0)
-
-## Usage Examples
-
-### Basic Proposal Evaluation
-
-```bash
-# Evaluate a proposal from a text file
-uv run talos proposals eval --file governance_proposal.txt
-
-# Use a different model
-uv run talos proposals eval --file proposal.md --model-name gpt-5 --temperature 0.1
-```
-
-### Proposal File Format
-
-The proposal file should contain the full text of the governance proposal. Supported formats include:
-
-- Plain text (.txt)
-- Markdown (.md)
-- Any text-based format
-
-Example proposal file content:
-```
-# Governance Proposal: Treasury Allocation
-
-## Summary
-This proposal requests allocation of 100,000 tokens from the treasury for development funding.
-
-## Details
-The funds will be used for:
-1. Core development team salaries
-2. Security audits
-3. Infrastructure costs
-
-## Timeline
-- Phase 1: 30 days
-- Phase 2: 60 days
-- Phase 3: 90 days
-
-## Budget Breakdown
-- Development: 60,000 tokens
-- Security: 25,000 tokens
-- Infrastructure: 15,000 tokens
-```
-
-## Output
-
-The command provides a comprehensive analysis including:
-
-- **Summary**: Brief overview of the proposal
-- **Risk Assessment**: Potential risks and concerns
-- **Benefits Analysis**: Expected benefits and outcomes
-- **Recommendation**: AI-generated recommendation (approve/reject/modify)
-- **Reasoning**: Detailed explanation of the recommendation
-
-## Environment Variables
-
-- `OPENAI_API_KEY`: Required for AI analysis functionality
-
-## Error Handling
-
-The command includes error handling for:
-- Missing or invalid file paths
-- File reading permissions
-- API connectivity issues
-- Invalid proposal formats
-
-## Integration
-
-The proposals CLI integrates with the main Talos agent system and can be used as part of automated governance workflows or manual proposal review processes.
diff --git a/docs/cli/twitter.md b/docs/cli/twitter.md
deleted file mode 100644
index 706ad01e..00000000
--- a/docs/cli/twitter.md
+++ /dev/null
@@ -1,375 +0,0 @@
-# Twitter Commands
-
-The Talos CLI provides comprehensive Twitter integration for sentiment analysis, community monitoring, and social media engagement.
-
-## Setup
-
-### Authentication
-
-Set your Twitter Bearer Token as an environment variable:
-
-```bash
-export TWITTER_BEARER_TOKEN=your_twitter_bearer_token_here
-```
-
-### API Access
-
-Twitter commands require:
-- Twitter API v2 access
-- Bearer Token with read permissions
-- Rate limiting awareness (300 requests per 15 minutes)
-
-## Commands
-
-### `get-user-prompt` - User Voice Analysis
-
-Analyze a Twitter user's general voice and communication style to generate a prompt that captures their personality.
-
-**Usage:**
-```bash
-uv run talos twitter get-user-prompt
-```
-
-**Arguments:**
-- `username`: Twitter username (without @ symbol)
-
-**Examples:**
-```bash
-# Analyze a specific user's communication style
-uv run talos twitter get-user-prompt elonmusk
-
-# Analyze multiple users
-uv run talos twitter get-user-prompt vitalikbuterin
-uv run talos twitter get-user-prompt naval
-```
-
-**Output:**
-```
-=== User Voice Analysis: @elonmusk ===
-
-Communication Style:
-- Direct and concise messaging
-- Technical depth with accessible explanations
-- Frequent use of humor and memes
-- Bold predictions and statements
-- Engineering-focused perspective
-
-Key Themes:
-- Technology and innovation
-- Space exploration and Mars colonization
-- Electric vehicles and sustainable energy
-- AI development and safety
-- Manufacturing and production efficiency
-
-Tone Characteristics:
-- Confident and assertive
-- Occasionally provocative
-- Optimistic about technology
-- Critical of bureaucracy
-- Supportive of free speech
-
-Generated Prompt:
-"Communicate with the direct, confident style of a tech innovator.
-Be concise but technically accurate. Use accessible language to
-explain complex concepts. Show optimism about technological
-progress while being realistic about challenges. Occasionally
-use humor to make points more memorable."
-```
-
-### `get-query-sentiment` - Sentiment Analysis
-
-Analyze sentiment around specific topics, keywords, or phrases on Twitter.
-
-**Usage:**
-```bash
-uv run talos twitter get-query-sentiment ""
-```
-
-**Arguments:**
-- `query`: Search query or topic to analyze
-
-**Options:**
-- `--limit`: Number of tweets to analyze (default: 100, max: 1000)
-- `--days`: Number of days to look back (default: 7, max: 30)
-- `--lang`: Language filter (default: en)
-
-**Examples:**
-```bash
-# Basic sentiment analysis
-uv run talos twitter get-query-sentiment "DeFi yield farming"
-
-# Extended analysis with more tweets
-uv run talos twitter get-query-sentiment "Ethereum staking" --limit 500
-
-# Recent sentiment (last 24 hours)
-uv run talos twitter get-query-sentiment "crypto market" --days 1
-
-# Multi-language analysis
-uv run talos twitter get-query-sentiment "Bitcoin" --lang all
-```
-
-**Output:**
-```
-=== Sentiment Analysis: "DeFi yield farming" ===
-
-Overall Sentiment: MIXED (Slightly Positive)
-Confidence: 78%
-
-Sentiment Distribution:
-🟢 Positive: 45% (450 tweets)
-🟡 Neutral: 32% (320 tweets)
-🔴 Negative: 23% (230 tweets)
-
-Key Themes:
-Positive Sentiments:
-- High APY opportunities (mentioned 156 times)
-- New protocol launches (mentioned 89 times)
-- Successful farming strategies (mentioned 67 times)
-
-Negative Sentiments:
-- Impermanent loss concerns (mentioned 78 times)
-- Rug pull warnings (mentioned 45 times)
-- Gas fee complaints (mentioned 34 times)
-
-Influential Voices:
-@defi_analyst (50k followers): "Yield farming still profitable with right strategy"
-@crypto_researcher (25k followers): "Be careful of new farms, many are unsustainable"
-
-Trending Hashtags:
-#DeFi (mentioned 234 times)
-#YieldFarming (mentioned 189 times)
-#APY (mentioned 156 times)
-
-Recommendations:
-- Monitor impermanent loss discussions for user concerns
-- Address gas fee issues in communications
-- Highlight sustainable yield strategies
-- Engage with influential voices sharing positive content
-```
-
-## Advanced Usage
-
-### Sentiment Monitoring
-
-Set up continuous monitoring for important topics:
-
-```bash
-#!/bin/bash
-# sentiment-monitor.sh
-
-topics=("our_protocol" "DeFi governance" "yield farming" "staking rewards")
-
-for topic in "${topics[@]}"; do
- echo "Monitoring: $topic"
- uv run talos twitter get-query-sentiment "$topic" --limit 200
- echo "---"
-done
-```
-
-### Competitor Analysis
-
-Monitor sentiment around competitor protocols:
-
-```bash
-#!/bin/bash
-# competitor-sentiment.sh
-
-competitors=("Compound" "Aave" "Uniswap" "SushiSwap")
-
-for competitor in "${competitors[@]}"; do
- echo "=== $competitor Sentiment ==="
- uv run talos twitter get-query-sentiment "$competitor protocol" --limit 300
- echo ""
-done
-```
-
-### Influencer Tracking
-
-Analyze key influencers in your space:
-
-```bash
-#!/bin/bash
-# influencer-analysis.sh
-
-influencers=("vitalikbuterin" "haydenzadams" "stanikulechov" "rleshner")
-
-for influencer in "${influencers[@]}"; do
- echo "=== @$influencer Analysis ==="
- uv run talos twitter get-user-prompt "$influencer"
- echo ""
-done
-```
-
-## Integration with Protocol Management
-
-### APR Adjustment Based on Sentiment
-
-```bash
-#!/bin/bash
-# apr-sentiment-adjustment.sh
-
-# Get current sentiment about yield farming
-sentiment=$(uv run talos twitter get-query-sentiment "yield farming APR" --format=json)
-
-# Extract sentiment score
-score=$(echo $sentiment | jq '.sentiment_score')
-
-# Adjust APR based on sentiment
-if [ $score -gt 0.7 ]; then
- echo "Positive sentiment detected. Consider maintaining or slightly increasing APR."
-elif [ $score -lt 0.3 ]; then
- echo "Negative sentiment detected. Consider increasing APR to attract users."
-else
- echo "Neutral sentiment. Monitor closely for changes."
-fi
-```
-
-### Community Response Strategy
-
-```bash
-#!/bin/bash
-# community-response.sh
-
-# Monitor mentions of our protocol
-mentions=$(uv run talos twitter get-query-sentiment "our_protocol_name" --limit 500)
-
-# Check for negative sentiment spikes
-negative_ratio=$(echo $mentions | jq '.negative_ratio')
-
-if [ $(echo "$negative_ratio > 0.4" | bc) -eq 1 ]; then
- echo "High negative sentiment detected!"
- echo "Recommended actions:"
- echo "1. Investigate main concerns"
- echo "2. Prepare community response"
- echo "3. Consider protocol adjustments"
-
- # Get specific concerns
- uv run talos twitter get-query-sentiment "our_protocol_name problems" --limit 100
-fi
-```
-
-## Configuration
-
-### Rate Limiting
-
-Configure rate limiting to respect Twitter API limits:
-
-```yaml
-twitter:
- rate_limiting:
- requests_per_window: 300
- window_minutes: 15
- backoff_strategy: "exponential"
-
- analysis:
- default_tweet_limit: 100
- max_tweet_limit: 1000
- default_days_back: 7
- max_days_back: 30
-```
-
-### Sentiment Thresholds
-
-Configure sentiment analysis thresholds:
-
-```yaml
-twitter:
- sentiment:
- positive_threshold: 0.6
- negative_threshold: 0.4
- confidence_threshold: 0.7
-
- alerts:
- negative_spike_threshold: 0.5
- volume_spike_threshold: 200
- influencer_mention_threshold: 10000 # follower count
-```
-
-## Error Handling
-
-### Common Issues
-
-**Rate Limiting:**
-```
-Error: Rate limit exceeded
-Solution: Wait 15 minutes or reduce query frequency
-```
-
-**Invalid Bearer Token:**
-```
-Error: Twitter API authentication failed
-Solution: Check TWITTER_BEARER_TOKEN environment variable
-```
-
-**No Results:**
-```
-Warning: No tweets found for query "very_specific_term"
-Solution: Try broader search terms or increase time range
-```
-
-**API Quota Exceeded:**
-```
-Error: Monthly API quota exceeded
-Solution: Upgrade Twitter API plan or wait for quota reset
-```
-
-### Automatic Handling
-
-Talos automatically handles:
-- **Rate Limiting** - Waits and retries when limits are reached
-- **Network Errors** - Retries with exponential backoff
-- **Partial Results** - Returns available data when some requests fail
-- **Invalid Queries** - Suggests alternative search terms
-
-## Best Practices
-
-### Effective Queries
-
-**Use Specific Terms:**
-```bash
-# Good: Specific and relevant
-uv run talos twitter get-query-sentiment "Ethereum staking rewards"
-
-# Poor: Too broad
-uv run talos twitter get-query-sentiment "crypto"
-```
-
-**Include Context:**
-```bash
-# Good: Includes protocol context
-uv run talos twitter get-query-sentiment "Compound lending rates"
-
-# Good: Includes sentiment context
-uv run talos twitter get-query-sentiment "DeFi security concerns"
-```
-
-### Monitoring Strategy
-
-**Regular Monitoring:**
-- Daily sentiment checks for your protocol
-- Weekly competitor analysis
-- Monthly influencer voice updates
-
-**Alert-Based Monitoring:**
-- Set up alerts for negative sentiment spikes
-- Monitor for unusual volume increases
-- Track mentions by high-influence accounts
-
-### Data Interpretation
-
-**Consider Context:**
-- Market conditions affect overall sentiment
-- News events can cause temporary sentiment shifts
-- Bot activity may skew results
-
-**Look for Trends:**
-- Focus on sentiment trends over time
-- Compare relative sentiment between topics
-- Identify recurring themes and concerns
-
-**Validate Insights:**
-- Cross-reference with other data sources
-- Verify with community feedback
-- Test sentiment-based decisions carefully
-
-The Twitter integration provides powerful tools for understanding community sentiment and making data-driven decisions about protocol management and community engagement.
diff --git a/docs/development/code-style.md b/docs/development/code-style.md
deleted file mode 100644
index 64452f87..00000000
--- a/docs/development/code-style.md
+++ /dev/null
@@ -1,543 +0,0 @@
-# Code Style Guide
-
-This document outlines the code style guidelines for the Talos project. Following these guidelines ensures consistency, readability, and maintainability across the codebase.
-
-## Python Code Style
-
-### PEP 8 Compliance
-
-All Python code must follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) guidelines:
-
-- Use 4 spaces for indentation (no tabs)
-- Keep lines under 88 characters long
-- Use lowercase with underscores for function and variable names
-- Use CamelCase for class names
-- Use UPPER_CASE for constants
-
-### Type Hints
-
-Use modern Python type hints consistently:
-
-```python
-# Good - Modern type hints
-def process_items(items: list[str]) -> dict[str, int]:
- return {item: len(item) for item in items}
-
-def get_user_data(user_id: int) -> dict[str, Any] | None:
- return database.get_user(user_id)
-
-# Bad - Old-style type hints
-from typing import List, Dict, Optional
-
-def process_items(items: List[str]) -> Dict[str, int]:
- return {item: len(item) for item in items}
-
-def get_user_data(user_id: int) -> Optional[Dict[str, Any]]:
- return database.get_user(user_id)
-```
-
-### Type Hint Guidelines
-
-- **Never use quotes** around type hints unless absolutely necessary
-- Use `from __future__ import annotations` if you need forward references
-- Provide type hints for all function signatures
-- Use `Any` sparingly and document why it's necessary
-
-```python
-# Good
-from __future__ import annotations
-
-def create_agent(config: AgentConfig) -> Agent:
- return Agent(config)
-
-# Bad
-def create_agent(config: "AgentConfig") -> "Agent":
- return Agent(config)
-```
-
-### Import Organization
-
-Organize imports into three sections with blank lines between them:
-
-```python
-# Standard library imports
-import os
-import sys
-from datetime import datetime, timedelta
-from pathlib import Path
-
-# Third-party imports
-import requests
-from pydantic import BaseModel, Field
-from openai import OpenAI
-
-# First-party imports
-from talos.core.agent import Agent
-from talos.core.memory import Memory
-from talos.utils.helpers import format_response
-```
-
-### Docstrings
-
-Use Google-style docstrings for all modules, classes, and functions:
-
-```python
-def analyze_sentiment(
- text: str,
- model: str = "gpt-5",
- confidence_threshold: float = 0.7
-) -> SentimentResult:
- """Analyze sentiment of the given text using an LLM.
-
- This function processes text through a language model to determine
- sentiment polarity and confidence scores.
-
- Args:
- text: The text to analyze for sentiment. Must not be empty.
- model: The LLM model to use for analysis. Defaults to "gpt-5".
- confidence_threshold: Minimum confidence score to return results.
- Must be between 0.0 and 1.0.
-
- Returns:
- SentimentResult containing polarity score (-1.0 to 1.0) and
- confidence score (0.0 to 1.0).
-
- Raises:
- ValueError: If text is empty or confidence_threshold is invalid.
- APIError: If the LLM service is unavailable.
-
- Example:
- >>> result = analyze_sentiment("I love this protocol!")
- >>> print(f"Sentiment: {result.polarity}, Confidence: {result.confidence}")
- Sentiment: 0.8, Confidence: 0.95
- """
- if not text.strip():
- raise ValueError("Text cannot be empty")
-
- if not 0.0 <= confidence_threshold <= 1.0:
- raise ValueError("Confidence threshold must be between 0.0 and 1.0")
-
- # Implementation here
- return SentimentResult(polarity=0.8, confidence=0.95)
-```
-
-## Pydantic Models
-
-### Model Configuration
-
-Use `ConfigDict` for model-specific configuration:
-
-```python
-from pydantic import BaseModel, ConfigDict, Field
-
-class AgentConfig(BaseModel):
- model_config = ConfigDict(
- arbitrary_types_allowed=True,
- validate_assignment=True,
- extra='forbid'
- )
-
- model_name: str = Field(default="gpt-5", description="LLM model to use")
- temperature: float = Field(default=0.7, ge=0.0, le=2.0)
- max_tokens: int = Field(default=1000, gt=0)
-```
-
-### Post-Initialization Logic
-
-Use `model_post_init` instead of overriding `__init__`:
-
-```python
-class Agent(BaseModel):
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str
- model: str = "gpt-5"
- memory: Memory | None = None
-
- def model_post_init(self, __context: Any) -> None:
- """Initialize memory after model creation."""
- if self.memory is None:
- self.memory = Memory(agent_name=self.name)
-```
-
-### Field Validation
-
-Use Pydantic validators for complex validation:
-
-```python
-from pydantic import BaseModel, field_validator, model_validator
-
-class TwitterQuery(BaseModel):
- query: str
- limit: int = 100
- days_back: int = 7
-
- @field_validator('query')
- @classmethod
- def validate_query(cls, v: str) -> str:
- if not v.strip():
- raise ValueError('Query cannot be empty')
- if len(v) > 500:
- raise ValueError('Query too long (max 500 characters)')
- return v.strip()
-
- @field_validator('limit')
- @classmethod
- def validate_limit(cls, v: int) -> int:
- if not 1 <= v <= 1000:
- raise ValueError('Limit must be between 1 and 1000')
- return v
-
- @model_validator(mode='after')
- def validate_model(self) -> 'TwitterQuery':
- if self.days_back > 30 and self.limit > 100:
- raise ValueError('Cannot use high limit with long time range')
- return self
-```
-
-## Error Handling
-
-### Exception Hierarchy
-
-Create custom exceptions for different error types:
-
-```python
-class TalosError(Exception):
- """Base exception for all Talos errors."""
- pass
-
-class ConfigurationError(TalosError):
- """Raised when configuration is invalid."""
- pass
-
-class APIError(TalosError):
- """Raised when external API calls fail."""
-
- def __init__(self, message: str, status_code: int | None = None):
- super().__init__(message)
- self.status_code = status_code
-
-class ValidationError(TalosError):
- """Raised when input validation fails."""
- pass
-```
-
-### Error Handling Patterns
-
-Use specific exception handling and provide helpful error messages:
-
-```python
-def fetch_twitter_data(query: str) -> list[dict[str, Any]]:
- """Fetch Twitter data with proper error handling."""
- try:
- response = twitter_client.search(query)
- return response.data
- except requests.HTTPError as e:
- if e.response.status_code == 429:
- raise APIError(
- "Twitter API rate limit exceeded. Please try again later.",
- status_code=429
- ) from e
- elif e.response.status_code == 401:
- raise APIError(
- "Twitter API authentication failed. Check your bearer token.",
- status_code=401
- ) from e
- else:
- raise APIError(f"Twitter API error: {e}", status_code=e.response.status_code) from e
- except requests.RequestException as e:
- raise APIError(f"Network error while fetching Twitter data: {e}") from e
- except Exception as e:
- raise TalosError(f"Unexpected error fetching Twitter data: {e}") from e
-```
-
-## Logging
-
-### Logger Configuration
-
-Use structured logging with appropriate levels:
-
-```python
-import logging
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-class Agent:
- def __init__(self, name: str):
- self.name = name
- self.logger = logging.getLogger(f"{__name__}.{name}")
-
- def process_query(self, query: str) -> QueryResponse:
- self.logger.info("Processing query", extra={
- "agent_name": self.name,
- "query_length": len(query),
- "query_hash": hash(query)
- })
-
- try:
- result = self._execute_query(query)
- self.logger.info("Query processed successfully", extra={
- "agent_name": self.name,
- "response_length": len(str(result))
- })
- return result
- except Exception as e:
- self.logger.error("Query processing failed", extra={
- "agent_name": self.name,
- "error": str(e),
- "error_type": type(e).__name__
- }, exc_info=True)
- raise
-```
-
-### Log Levels
-
-Use appropriate log levels:
-
-- **DEBUG** - Detailed information for debugging
-- **INFO** - General information about program execution
-- **WARNING** - Something unexpected happened but the program continues
-- **ERROR** - A serious problem occurred
-- **CRITICAL** - A very serious error occurred
-
-## Testing Style
-
-### Test Organization
-
-Organize tests to mirror the source code structure:
-
-```
-tests/
-├── unit/
-│ ├── core/
-│ │ ├── test_agent.py
-│ │ └── test_memory.py
-│ ├── skills/
-│ │ └── test_sentiment.py
-│ └── services/
-│ └── test_yield_manager.py
-├── integration/
-│ ├── test_github_integration.py
-│ └── test_twitter_integration.py
-└── e2e/
- └── test_full_workflow.py
-```
-
-### Test Naming
-
-Use descriptive test names that explain the scenario:
-
-```python
-def test_agent_processes_simple_query_successfully():
- """Test that agent can process a simple query and return valid response."""
- pass
-
-def test_agent_raises_error_when_query_is_empty():
- """Test that agent raises ValidationError when given empty query."""
- pass
-
-def test_sentiment_analysis_returns_positive_score_for_positive_text():
- """Test that sentiment analysis correctly identifies positive sentiment."""
- pass
-```
-
-### Test Structure
-
-Follow the Arrange-Act-Assert pattern:
-
-```python
-def test_memory_stores_and_retrieves_data():
- # Arrange
- memory = Memory(agent_name="test_agent")
- test_data = "This is a test memory"
- metadata = {"type": "test", "importance": "high"}
-
- # Act
- memory.add_memory(test_data, metadata)
- results = memory.search("test memory", limit=1)
-
- # Assert
- assert len(results) == 1
- assert results[0].description == test_data
- assert results[0].metadata == metadata
-```
-
-### Fixtures and Mocking
-
-Use fixtures for common test setup:
-
-```python
-import pytest
-from unittest.mock import Mock, patch
-
-@pytest.fixture
-def mock_openai_client():
- """Mock OpenAI client for testing."""
- client = Mock()
- client.chat.completions.create.return_value = Mock(
- choices=[Mock(message=Mock(content="Test response"))]
- )
- return client
-
-@pytest.fixture
-def test_agent(mock_openai_client):
- """Create test agent with mocked dependencies."""
- with patch('talos.core.agent.OpenAI', return_value=mock_openai_client):
- return Agent(name="test_agent", model="gpt-5")
-
-def test_agent_generates_response(test_agent):
- """Test that agent generates appropriate response."""
- response = test_agent.process_query("What is the weather?")
- assert response is not None
- assert isinstance(response, QueryResponse)
-```
-
-## Performance Guidelines
-
-### Memory Management
-
-Write memory-efficient code:
-
-```python
-# Good - Use generators for large datasets
-def process_large_dataset(data_source: str) -> Iterator[ProcessedItem]:
- """Process large dataset efficiently using generators."""
- with open(data_source) as file:
- for line in file:
- yield process_line(line)
-
-# Good - Use context managers for resource cleanup
-def analyze_file(file_path: str) -> AnalysisResult:
- """Analyze file with proper resource management."""
- with open(file_path) as file:
- content = file.read()
- return analyze_content(content)
-
-# Bad - Loading entire dataset into memory
-def process_large_dataset_bad(data_source: str) -> list[ProcessedItem]:
- with open(data_source) as file:
- all_lines = file.readlines() # Loads entire file into memory
- return [process_line(line) for line in all_lines]
-```
-
-### Async Programming
-
-Use async/await for I/O-bound operations:
-
-```python
-import asyncio
-import aiohttp
-from typing import AsyncIterator
-
-async def fetch_multiple_urls(urls: list[str]) -> list[dict[str, Any]]:
- """Fetch multiple URLs concurrently."""
- async with aiohttp.ClientSession() as session:
- tasks = [fetch_url(session, url) for url in urls]
- results = await asyncio.gather(*tasks, return_exceptions=True)
- return [r for r in results if not isinstance(r, Exception)]
-
-async def fetch_url(session: aiohttp.ClientSession, url: str) -> dict[str, Any]:
- """Fetch single URL with error handling."""
- try:
- async with session.get(url) as response:
- return await response.json()
- except Exception as e:
- logger.error(f"Failed to fetch {url}: {e}")
- raise
-```
-
-## Security Guidelines
-
-### Input Validation
-
-Always validate and sanitize inputs:
-
-```python
-def process_user_query(query: str, user_id: int) -> QueryResponse:
- """Process user query with proper validation."""
- # Validate input parameters
- if not isinstance(query, str):
- raise ValidationError("Query must be a string")
-
- if not query.strip():
- raise ValidationError("Query cannot be empty")
-
- if len(query) > 10000:
- raise ValidationError("Query too long (max 10000 characters)")
-
- if not isinstance(user_id, int) or user_id <= 0:
- raise ValidationError("Invalid user ID")
-
- # Sanitize query to prevent injection attacks
- sanitized_query = sanitize_query(query)
-
- # Process the sanitized query
- return execute_query(sanitized_query, user_id)
-```
-
-### Secret Management
-
-Never hardcode secrets or API keys:
-
-```python
-import os
-from typing import Optional
-
-def get_api_key(service: str) -> str:
- """Get API key from environment variables."""
- key_name = f"{service.upper()}_API_KEY"
- api_key = os.getenv(key_name)
-
- if not api_key:
- raise ConfigurationError(f"Missing required environment variable: {key_name}")
-
- return api_key
-
-# Good - Use environment variables
-openai_key = get_api_key("openai")
-
-# Bad - Hardcoded secrets
-# openai_key = "sk-1234567890abcdef" # Never do this!
-```
-
-## Documentation Style
-
-### Code Comments
-
-Write clear, helpful comments:
-
-```python
-def calculate_optimal_apr(
- market_data: MarketData,
- sentiment_score: float,
- current_apr: float
-) -> float:
- """Calculate optimal APR based on market conditions and sentiment."""
-
- # Base APR adjustment based on market volatility
- # Higher volatility requires higher APR to attract users
- volatility_adjustment = market_data.volatility * 0.1
-
- # Sentiment adjustment: positive sentiment allows lower APR
- # Negative sentiment requires higher APR to maintain attractiveness
- sentiment_adjustment = (0.5 - sentiment_score) * 0.05
-
- # Calculate new APR with bounds checking
- new_apr = current_apr + volatility_adjustment + sentiment_adjustment
-
- # Ensure APR stays within reasonable bounds (1% to 20%)
- return max(0.01, min(0.20, new_apr))
-```
-
-### README and Documentation
-
-Keep documentation up to date and comprehensive:
-
-- Explain the purpose and scope of each module
-- Provide usage examples
-- Document configuration options
-- Include troubleshooting guides
-- Maintain API documentation
-
-Following these code style guidelines ensures that the Talos codebase remains clean, maintainable, and accessible to all contributors.
diff --git a/docs/development/contributing.md b/docs/development/contributing.md
deleted file mode 100644
index 32415871..00000000
--- a/docs/development/contributing.md
+++ /dev/null
@@ -1,416 +0,0 @@
-# Contributing
-
-Thank you for your interest in contributing to Talos! This guide will help you get started with contributing to the project.
-
-## Getting Started
-
-### Prerequisites
-
-- Python 3.8 or higher
-- `uv` package manager (recommended)
-- Git
-- Basic understanding of AI agents and DeFi protocols
-
-### Development Setup
-
-1. **Fork and clone the repository**:
- ```bash
- git clone https://github.com/your-username/talos.git
- cd talos
- ```
-
-2. **Create a virtual environment**:
- ```bash
- uv venv
- source .venv/bin/activate
- ```
-
-3. **Install dependencies**:
- ```bash
- ./scripts/install_deps.sh
- ```
-
-4. **Set up environment variables**:
- ```bash
- export OPENAI_API_KEY="your-openai-api-key"
- export PINATA_API_KEY="your-pinata-api-key"
- export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
- ```
-
-5. **Run tests to verify setup**:
- ```bash
- ./scripts/run_checks.sh
- ```
-
-## Development Workflow
-
-### Branch Management
-
-1. **Create a feature branch**:
- ```bash
- git checkout -b feature/your-feature-name
- ```
-
-2. **Make your changes** following the code style guidelines
-
-3. **Run pre-commit checks**:
- ```bash
- ./scripts/run_checks.sh
- ```
-
-4. **Commit your changes**:
- ```bash
- git add .
- git commit -m "feat: add your feature description"
- ```
-
-5. **Push and create a pull request**:
- ```bash
- git push origin feature/your-feature-name
- ```
-
-### Commit Message Format
-
-Follow conventional commit format:
-
-- `feat:` - New features
-- `fix:` - Bug fixes
-- `docs:` - Documentation changes
-- `style:` - Code style changes (formatting, etc.)
-- `refactor:` - Code refactoring
-- `test:` - Adding or updating tests
-- `chore:` - Maintenance tasks
-
-Examples:
-```
-feat: add sentiment analysis for Twitter data
-fix: resolve memory leak in agent initialization
-docs: update API documentation for new endpoints
-```
-
-## Code Quality Standards
-
-### Pre-commit Checks
-
-Before committing any changes, ensure you run the following checks:
-
-1. **Ruff** - Lint and format the code:
- ```bash
- uv run ruff check .
- uv run ruff format .
- ```
-
-2. **Mypy** - Type checking:
- ```bash
- uv run mypy src
- ```
-
-3. **Pytest** - Run all tests:
- ```bash
- uv run pytest
- ```
-
-### Automated Checks
-
-Run all checks at once:
-```bash
-./scripts/run_checks.sh
-```
-
-This script runs:
-- Ruff linting and formatting
-- Mypy type checking
-- Pytest test suite
-
-## Code Style Guidelines
-
-### Python Code Style
-
-- Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) for all Python code
-- Use modern Python type hints (`list` and `dict` instead of `List` and `Dict`)
-- Never use quotes around type hints
-- Use type hints for all function signatures
-- Write clear and concise docstrings for all modules, classes, and functions
-- Keep lines under 88 characters long
-
-### Type Hints
-
-```python
-# Good
-def process_data(items: list[str]) -> dict[str, int]:
- """Process a list of items and return counts."""
- return {item: len(item) for item in items}
-
-# Bad
-def process_data(items: "List[str]") -> "Dict[str, int]":
- return {item: len(item) for item in items}
-```
-
-### Pydantic Models
-
-When creating Pydantic `BaseModel`s:
-- Use `model_post_init` for post-initialization logic instead of overriding `__init__`
-- Use `ConfigDict` for model-specific configuration
-
-```python
-from pydantic import BaseModel, ConfigDict
-
-class MyModel(BaseModel):
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str
- value: int
-
- def model_post_init(self, __context):
- # Post-initialization logic here
- pass
-```
-
-### Import Organization
-
-Always put imports at the top of the file, organized into sections:
-
-```python
-# Standard library imports
-import os
-import sys
-from datetime import datetime
-
-# Third-party imports
-import requests
-from pydantic import BaseModel
-
-# First-party imports
-from talos.core.agent import Agent
-from talos.utils.helpers import format_response
-```
-
-## Testing Guidelines
-
-### Writing Tests
-
-- Write tests for all new functionality
-- Use descriptive test names that explain what is being tested
-- Follow the Arrange-Act-Assert pattern
-- Use fixtures for common test setup
-
-```python
-def test_agent_processes_query_successfully():
- # Arrange
- agent = Agent(model="gpt-5")
- query = "What is the current market sentiment?"
-
- # Act
- result = agent.process_query(query)
-
- # Assert
- assert result is not None
- assert isinstance(result, QueryResponse)
- assert len(result.answers) > 0
-```
-
-### Test Categories
-
-- **Unit Tests** - Test individual functions and classes
-- **Integration Tests** - Test component interactions
-- **End-to-End Tests** - Test complete workflows
-- **Performance Tests** - Test performance characteristics
-
-### Running Tests
-
-```bash
-# Run all tests
-uv run pytest
-
-# Run specific test file
-uv run pytest tests/test_agent.py
-
-# Run with coverage
-uv run pytest --cov=src
-
-# Run tests matching pattern
-uv run pytest -k "test_sentiment"
-```
-
-## Documentation
-
-### Docstring Format
-
-Use Google-style docstrings:
-
-```python
-def analyze_sentiment(text: str, model: str = "gpt-5") -> float:
- """Analyze sentiment of the given text.
-
- Args:
- text: The text to analyze for sentiment.
- model: The LLM model to use for analysis.
-
- Returns:
- A sentiment score between -1.0 (negative) and 1.0 (positive).
-
- Raises:
- ValueError: If text is empty or model is not supported.
-
- Example:
- >>> score = analyze_sentiment("I love this protocol!")
- >>> print(f"Sentiment: {score}")
- Sentiment: 0.8
- """
- if not text.strip():
- raise ValueError("Text cannot be empty")
-
- # Implementation here
- return 0.0
-```
-
-### API Documentation
-
-- Document all public APIs
-- Include examples in docstrings
-- Update documentation when changing APIs
-- Use type hints consistently
-
-## Architecture Guidelines
-
-### Adding New Skills
-
-When adding new skills to Talos:
-
-1. **Inherit from base Skill class**:
- ```python
- from talos.skills.base import Skill
-
- class MyNewSkill(Skill):
- def run(self, **kwargs) -> QueryResponse:
- # Implementation
- pass
- ```
-
-2. **Add to MainAgent setup**:
- ```python
- # Skills are automatically registered in MainAgent._setup_skills_and_services()
- # Add your skill to the skills list in that method
- ```
-
-3. **Add comprehensive tests**
-4. **Update documentation**
-
-### Adding New Services
-
-When adding new services:
-
-1. **Inherit from base Service class**:
- ```python
- from talos.services.abstract.service import Service
-
- class MyNewService(Service):
- def process(self, request: ServiceRequest) -> ServiceResponse:
- # Implementation
- pass
- ```
-
-2. **Follow single responsibility principle**
-3. **Make services stateless when possible**
-4. **Add proper error handling**
-
-### Adding New Tools
-
-When adding new tools:
-
-1. **Inherit from BaseTool**
-2. **Wrap with SupervisedTool for security**
-3. **Add comprehensive error handling**
-4. **Document all parameters and return values**
-
-## Security Guidelines
-
-### API Keys and Secrets
-
-- Never commit API keys or secrets to the repository
-- Use environment variables for sensitive configuration
-- Add sensitive files to `.gitignore`
-- Use the secrets management system for production
-
-### Input Validation
-
-- Validate all user inputs
-- Sanitize data before processing
-- Use type hints and Pydantic models for validation
-- Handle edge cases gracefully
-
-### Error Handling
-
-- Don't expose sensitive information in error messages
-- Log errors appropriately for debugging
-- Provide helpful error messages to users
-- Use proper exception handling
-
-## Performance Guidelines
-
-### Memory Management
-
-- Use batch operations for memory-intensive tasks
-- Implement proper cleanup in destructors
-- Monitor memory usage in long-running processes
-- Use generators for large datasets
-
-### API Usage
-
-- Implement proper rate limiting
-- Use caching for frequently accessed data
-- Batch API calls when possible
-- Handle API errors gracefully
-
-## Getting Help
-
-### Resources
-
-- **Documentation** - Check the full documentation for detailed guides
-- **Issues** - Search existing issues before creating new ones
-- **Discussions** - Use GitHub Discussions for questions and ideas
-- **Code Review** - Request reviews from maintainers
-
-### Communication
-
-- Be respectful and constructive in all interactions
-- Provide clear descriptions of issues and proposed changes
-- Include relevant context and examples
-- Follow up on feedback and suggestions
-
-### Issue Reporting
-
-When reporting bugs:
-
-1. **Check existing issues** first
-2. **Provide clear reproduction steps**
-3. **Include relevant logs and error messages**
-4. **Specify your environment** (OS, Python version, etc.)
-5. **Use the issue template** if available
-
-### Feature Requests
-
-When requesting features:
-
-1. **Describe the use case** clearly
-2. **Explain the expected behavior**
-3. **Consider the impact** on existing functionality
-4. **Provide examples** if possible
-
-## Release Process
-
-### Version Management
-
-- Follow semantic versioning (SemVer)
-- Update version numbers in appropriate files
-- Create release notes for significant changes
-- Tag releases appropriately
-
-### Deployment
-
-- Ensure all tests pass before release
-- Update documentation for new features
-- Coordinate with maintainers for release timing
-- Monitor for issues after release
-
-Thank you for contributing to Talos! Your contributions help make decentralized protocol management more accessible and secure.
diff --git a/docs/development/performance.md b/docs/development/performance.md
deleted file mode 100644
index d5977b1e..00000000
--- a/docs/development/performance.md
+++ /dev/null
@@ -1,422 +0,0 @@
-# Performance Analysis and Optimization
-
-This document provides detailed analysis of performance issues identified in the Talos codebase and recommendations for optimization.
-
-## Executive Summary
-
-Performance analysis of the Talos AI agent codebase has identified several optimization opportunities ranging from high-impact file I/O bottlenecks to medium-impact caching opportunities. This document outlines the issues, their impact, and implementation strategies for improvement.
-
-## Identified Performance Issues
-
-### 1. Memory Management File I/O (HIGH IMPACT)
-
-**Location**: `src/talos/core/memory.py:58-70`
-
-**Issue**: Every memory addition triggers immediate file write operations, causing significant I/O overhead.
-
-```python
-def add_memory(self, description: str, metadata: Optional[dict] = None):
- # ... memory creation logic ...
- self.memories.append(memory)
- if self.index is None:
- self.index = IndexFlatL2(len(embedding))
- self.index.add(np.array([embedding], dtype=np.float32))
- self._save() # ← Immediate file write on every addition
-```
-
-**Impact**:
-- High latency for memory operations
-- Excessive disk I/O in memory-intensive workflows
-- Poor scalability for bulk memory additions
-
-**Solution**: Implement batched writes with configurable batch size and auto-flush on destruction.
-
-**Implementation**:
-```python
-class Memory:
- def __init__(self, batch_size: int = 10, auto_save: bool = True):
- self.batch_size = batch_size
- self.auto_save = auto_save
- self.pending_writes = 0
-
- def add_memory(self, description: str, metadata: Optional[dict] = None):
- # ... memory creation logic ...
- self.memories.append(memory)
- self.pending_writes += 1
-
- if self.pending_writes >= self.batch_size:
- self.flush()
-
- def flush(self):
- """Manually flush pending writes to disk."""
- if self.pending_writes > 0:
- self._save()
- self.pending_writes = 0
-
- def __del__(self):
- """Ensure data is saved on destruction."""
- if self.auto_save and self.pending_writes > 0:
- self.flush()
-```
-
-### 2. CLI History Management Redundancy (MEDIUM IMPACT)
-
-**Location**: `src/talos/cli/main.py:97-102`
-
-**Issue**: Manual history management with redundant message appending in interactive mode.
-
-```python
-result = main_agent.run(user_input, history=history)
-history.append(HumanMessage(content=user_input)) # ← Redundant append
-if isinstance(result, AIMessage):
- history.append(AIMessage(content=result.content)) # ← Manual management
-else:
- history.append(AIMessage(content=str(result)))
-```
-
-**Impact**:
-- Duplicated history management logic
-- Potential for history inconsistencies
-- Unnecessary memory usage in long conversations
-
-**Solution**: Leverage the agent's built-in history management instead of manual tracking.
-
-**Implementation**:
-```python
-def interactive_mode():
- main_agent = MainAgent()
-
- while True:
- user_input = input(">> ")
- if user_input.lower() == 'exit':
- break
-
- # Let the agent manage its own history
- result = main_agent.run(user_input)
- print(result.content if hasattr(result, 'content') else str(result))
-```
-
-### 3. GitHub API Repository Caching (MEDIUM IMPACT)
-
-**Location**: `src/talos/tools/github/tools.py`
-
-**Issue**: Repository objects are fetched repeatedly instead of being cached.
-
-```python
-def get_open_issues(self, user: str, project: str) -> list[dict[str, Any]]:
- repo = self._github.get_repo(f"{user}/{project}") # ← Repeated API call
- # ...
-
-def get_all_pull_requests(self, user: str, project: str, state: str = "open") -> list[dict[str, Any]]:
- repo = self._github.get_repo(f"{user}/{project}") # ← Same repo fetched again
- # ...
-```
-
-**Impact**:
-- Unnecessary API calls to GitHub
-- Increased latency for GitHub operations
-- Potential rate limiting issues
-
-**Solution**: Implement repository object caching with TTL expiration.
-
-**Implementation**:
-```python
-from functools import lru_cache
-from datetime import datetime, timedelta
-from typing import Dict, Tuple
-
-class GithubTools:
- def __init__(self):
- self._repo_cache: Dict[str, Tuple[Any, datetime]] = {}
- self._cache_ttl = timedelta(minutes=5)
-
- def _get_repo_cached(self, repo_name: str):
- """Get repository with caching."""
- now = datetime.now()
-
- if repo_name in self._repo_cache:
- repo, cached_time = self._repo_cache[repo_name]
- if now - cached_time < self._cache_ttl:
- return repo
-
- # Fetch fresh repository
- repo = self._github.get_repo(repo_name)
- self._repo_cache[repo_name] = (repo, now)
- return repo
-
- def get_open_issues(self, user: str, project: str) -> list[dict[str, Any]]:
- repo = self._get_repo_cached(f"{user}/{project}")
- # ... rest of implementation
-```
-
-### 4. Prompt Loading Without Caching (LOW IMPACT)
-
-**Location**: `src/talos/prompts/prompt_managers/file_prompt_manager.py:18-31`
-
-**Issue**: Prompts are loaded from files on every initialization without caching.
-
-```python
-def load_prompts(self) -> None:
- for filename in os.listdir(self.prompts_dir):
- if filename.endswith(".json"):
- with open(os.path.join(self.prompts_dir, filename)) as f: # ← File I/O on every load
- prompt_data = json.load(f)
-```
-
-**Impact**:
-- Repeated file I/O for static prompt data
-- Slower initialization times
-- Unnecessary disk access
-
-**Solution**: Implement prompt caching with file modification time checking.
-
-**Implementation**:
-```python
-import os
-import json
-from typing import Dict, Tuple
-from datetime import datetime
-
-class FilePromptManager:
- def __init__(self, prompts_dir: str):
- self.prompts_dir = prompts_dir
- self._prompt_cache: Dict[str, Tuple[dict, float]] = {}
-
- def load_prompts(self) -> None:
- for filename in os.listdir(self.prompts_dir):
- if filename.endswith(".json"):
- filepath = os.path.join(self.prompts_dir, filename)
- mtime = os.path.getmtime(filepath)
-
- # Check if file has been modified since last cache
- if filename in self._prompt_cache:
- cached_data, cached_mtime = self._prompt_cache[filename]
- if mtime <= cached_mtime:
- self.prompts[filename[:-5]] = cached_data
- continue
-
- # Load and cache the prompt
- with open(filepath) as f:
- prompt_data = json.load(f)
- self._prompt_cache[filename] = (prompt_data, mtime)
- self.prompts[filename[:-5]] = prompt_data
-```
-
-### 5. Tool Registration Inefficiency (LOW IMPACT)
-
-**Location**: `src/talos/core/main_agent.py:74-75`
-
-**Issue**: Tools are registered in loops without checking for duplicates efficiently.
-
-```python
-for skill in self.skills:
- tool_manager.register_tool(skill.create_ticket_tool()) # ← Potential duplicate registrations
-```
-
-**Impact**:
-- Potential duplicate tool registrations
-- Inefficient tool lookup
-- Memory overhead from duplicate tools
-
-**Solution**: Implement efficient duplicate checking or use set-based registration.
-
-**Implementation**:
-```python
-class ToolManager:
- def __init__(self):
- self._registered_tools: set[str] = set()
- self.tools: dict[str, BaseTool] = {}
-
- def register_tool(self, tool: BaseTool) -> bool:
- """Register tool, returning True if newly registered."""
- tool_id = f"{tool.__class__.__name__}_{hash(str(tool))}"
-
- if tool_id in self._registered_tools:
- return False # Already registered
-
- self._registered_tools.add(tool_id)
- self.tools[tool.name] = tool
- return True
-```
-
-## Optimization Priority
-
-1. **Memory Management File I/O** - Immediate implementation recommended
-2. **GitHub API Repository Caching** - High value for GitHub-heavy workflows
-3. **CLI History Management** - Improves interactive experience
-4. **Prompt Loading Caching** - Low overhead improvement
-5. **Tool Registration** - Minor optimization
-
-## Implementation Status
-
-✅ **Memory Management Optimization**: Implemented batched writes with configurable batch size
-- Added `batch_size` and `auto_save` parameters
-- Implemented `flush()` method for manual persistence
-- Added destructor to ensure data persistence
-- Maintains backward compatibility
-
-## Performance Monitoring
-
-### Metrics to Track
-
-**Memory Operations**:
-- Average memory addition latency
-- Batch write frequency
-- Memory usage over time
-- Disk I/O operations per minute
-
-**API Operations**:
-- GitHub API call frequency
-- Cache hit/miss ratios
-- API response times
-- Rate limit utilization
-
-**System Performance**:
-- CPU usage during operations
-- Memory consumption patterns
-- Disk I/O throughput
-- Network bandwidth usage
-
-### Monitoring Implementation
-
-```python
-import time
-import logging
-from functools import wraps
-from typing import Callable, Any
-
-class PerformanceMonitor:
- def __init__(self):
- self.metrics = {}
- self.logger = logging.getLogger(__name__)
-
- def time_operation(self, operation_name: str):
- """Decorator to time operations."""
- def decorator(func: Callable) -> Callable:
- @wraps(func)
- def wrapper(*args, **kwargs) -> Any:
- start_time = time.time()
- try:
- result = func(*args, **kwargs)
- duration = time.time() - start_time
- self._record_metric(operation_name, duration, "success")
- return result
- except Exception as e:
- duration = time.time() - start_time
- self._record_metric(operation_name, duration, "error")
- raise
- return wrapper
- return decorator
-
- def _record_metric(self, name: str, duration: float, status: str):
- """Record performance metric."""
- if name not in self.metrics:
- self.metrics[name] = []
-
- self.metrics[name].append({
- "duration": duration,
- "status": status,
- "timestamp": time.time()
- })
-
- self.logger.info(f"Operation {name} completed in {duration:.3f}s with status {status}")
-
-# Usage example
-monitor = PerformanceMonitor()
-
-class Memory:
- @monitor.time_operation("memory_add")
- def add_memory(self, description: str, metadata: Optional[dict] = None):
- # Implementation here
- pass
-```
-
-## Benchmarking
-
-### Performance Tests
-
-Create benchmarks to measure optimization effectiveness:
-
-```python
-import pytest
-import time
-from talos.core.memory import Memory
-
-class TestMemoryPerformance:
- def test_batch_write_performance(self):
- """Test that batch writes improve performance."""
- # Test without batching
- memory_no_batch = Memory(batch_size=1)
- start_time = time.time()
- for i in range(100):
- memory_no_batch.add_memory(f"Memory {i}")
- no_batch_time = time.time() - start_time
-
- # Test with batching
- memory_batch = Memory(batch_size=10)
- start_time = time.time()
- for i in range(100):
- memory_batch.add_memory(f"Memory {i}")
- memory_batch.flush() # Ensure all writes complete
- batch_time = time.time() - start_time
-
- # Batching should be significantly faster
- assert batch_time < no_batch_time * 0.5
-
- def test_memory_usage_scaling(self):
- """Test memory usage scales linearly with data."""
- import psutil
- import os
-
- process = psutil.Process(os.getpid())
- initial_memory = process.memory_info().rss
-
- memory = Memory(batch_size=50)
- for i in range(1000):
- memory.add_memory(f"Large memory entry {i} with lots of content")
-
- final_memory = process.memory_info().rss
- memory_increase = final_memory - initial_memory
-
- # Memory increase should be reasonable (less than 100MB for 1000 entries)
- assert memory_increase < 100 * 1024 * 1024
-```
-
-## Recommendations
-
-### Immediate Actions
-
-1. **Implement Memory Batching** - Deploy the batched memory writes immediately
-2. **Add Performance Monitoring** - Implement basic timing and metrics collection
-3. **Create Benchmarks** - Establish baseline performance measurements
-
-### Medium-term Improvements
-
-1. **GitHub API Caching** - Implement repository caching for GitHub operations
-2. **Prompt Caching** - Add file-based caching for prompt templates
-3. **Connection Pooling** - Implement connection pooling for external APIs
-
-### Long-term Optimizations
-
-1. **Async Operations** - Convert I/O-bound operations to async/await
-2. **Database Backend** - Consider replacing file-based storage with database
-3. **Distributed Caching** - Implement Redis or similar for multi-instance deployments
-
-### Testing Strategy
-
-All optimizations should be thoroughly tested with:
-- **Unit Tests** - Test new functionality in isolation
-- **Performance Benchmarks** - Compare before/after performance
-- **Integration Tests** - Ensure no regressions in functionality
-- **Memory Profiling** - Monitor memory usage patterns
-- **Load Testing** - Test performance under realistic workloads
-
-### Monitoring and Alerting
-
-Set up monitoring for:
-- **Performance Degradation** - Alert when operations exceed baseline times
-- **Memory Leaks** - Monitor for increasing memory usage over time
-- **API Rate Limits** - Track API usage to prevent rate limiting
-- **Error Rates** - Monitor for increased error rates after optimizations
-
-By implementing these performance optimizations, Talos will be able to handle larger workloads more efficiently while maintaining reliability and user experience.
diff --git a/docs/development/testing.md b/docs/development/testing.md
deleted file mode 100644
index d3c5e0b3..00000000
--- a/docs/development/testing.md
+++ /dev/null
@@ -1,633 +0,0 @@
-# Testing Guide
-
-This document provides comprehensive guidance on testing practices for the Talos project, including unit tests, integration tests, and end-to-end testing strategies.
-
-## Testing Philosophy
-
-Talos follows a comprehensive testing approach that ensures:
-- **Reliability** - All core functionality is thoroughly tested
-- **Security** - Security-critical components have extensive test coverage
-- **Performance** - Performance characteristics are validated through testing
-- **Maintainability** - Tests serve as documentation and prevent regressions
-
-## Test Structure
-
-### Directory Organization
-
-```
-tests/
-├── unit/ # Unit tests for individual components
-│ ├── core/
-│ │ ├── test_agent.py
-│ │ ├── test_memory.py
-│ │ └── test_main_agent.py
-│ ├── skills/
-│ │ ├── test_proposals.py
-│ │ ├── test_sentiment.py
-│ │ └── test_twitter_sentiment.py
-│ ├── services/
-│ │ ├── test_yield_manager.py
-│ │ └── test_github.py
-│ ├── tools/
-│ │ ├── test_twitter.py
-│ │ └── test_github_tools.py
-│ └── hypervisor/
-│ ├── test_hypervisor.py
-│ └── test_supervisor.py
-├── integration/ # Integration tests
-│ ├── test_agent_workflow.py
-│ ├── test_github_integration.py
-│ └── test_twitter_integration.py
-├── e2e/ # End-to-end tests
-│ ├── test_proposal_evaluation.py
-│ ├── test_sentiment_analysis.py
-│ └── test_cli_workflows.py
-├── performance/ # Performance tests
-│ ├── test_memory_performance.py
-│ └── test_api_performance.py
-├── fixtures/ # Test fixtures and data
-│ ├── sample_proposals.json
-│ ├── mock_twitter_data.json
-│ └── test_configs.yaml
-└── conftest.py # Pytest configuration and shared fixtures
-```
-
-## Running Tests
-
-### Basic Test Execution
-
-```bash
-# Run all tests
-uv run pytest
-
-# Run specific test file
-uv run pytest tests/unit/core/test_agent.py
-
-# Run tests matching pattern
-uv run pytest -k "test_sentiment"
-
-# Run tests with verbose output
-uv run pytest -v
-
-# Run tests with coverage
-uv run pytest --cov=src --cov-report=html
-```
-
-### Test Categories
-
-```bash
-# Run only unit tests
-uv run pytest tests/unit/
-
-# Run only integration tests
-uv run pytest tests/integration/
-
-# Run only end-to-end tests
-uv run pytest tests/e2e/
-
-# Run performance tests
-uv run pytest tests/performance/
-```
-
-### Continuous Testing
-
-```bash
-# Watch for changes and re-run tests
-uv run pytest-watch
-
-# Run tests in parallel
-uv run pytest -n auto
-```
-
-## Unit Testing
-
-### Test Structure
-
-Follow the Arrange-Act-Assert pattern:
-
-```python
-def test_agent_processes_query_successfully():
- # Arrange - Set up test data and dependencies
- agent = Agent(name="test_agent", model="gpt-5")
- query = "What is the current market sentiment?"
- expected_response_type = QueryResponse
-
- # Act - Execute the functionality being tested
- result = agent.process_query(query)
-
- # Assert - Verify the results
- assert result is not None
- assert isinstance(result, expected_response_type)
- assert len(result.answers) > 0
- assert result.answers[0].strip() != ""
-```
-
-### Mocking External Dependencies
-
-Use mocks for external services and APIs:
-
-```python
-import pytest
-from unittest.mock import Mock, patch, MagicMock
-from talos.core.agent import Agent
-from talos.tools.twitter_client import TwitterClient
-
-@pytest.fixture
-def mock_openai_client():
- """Mock OpenAI client for testing."""
- client = Mock()
- client.chat.completions.create.return_value = Mock(
- choices=[Mock(message=Mock(content="Test response from AI"))]
- )
- return client
-
-@pytest.fixture
-def mock_twitter_client():
- """Mock Twitter client for testing."""
- client = Mock(spec=TwitterClient)
- client.search_tweets.return_value = [
- {"text": "Positive tweet about DeFi", "user": {"followers_count": 1000}},
- {"text": "Negative tweet about protocols", "user": {"followers_count": 500}}
- ]
- return client
-
-def test_sentiment_analysis_with_mocked_twitter(mock_twitter_client):
- """Test sentiment analysis with mocked Twitter data."""
- with patch('talos.skills.twitter_sentiment.TwitterClient', return_value=mock_twitter_client):
- skill = TwitterSentimentSkill()
- result = skill.run(query="DeFi protocols", limit=10)
-
- assert result is not None
- assert isinstance(result, QueryResponse)
- mock_twitter_client.search_tweets.assert_called_once()
-```
-
-### Testing Error Conditions
-
-Test error handling and edge cases:
-
-```python
-def test_agent_raises_error_for_empty_query():
- """Test that agent raises appropriate error for empty query."""
- agent = Agent(name="test_agent")
-
- with pytest.raises(ValidationError, match="Query cannot be empty"):
- agent.process_query("")
-
-def test_agent_handles_api_timeout():
- """Test that agent handles API timeouts gracefully."""
- with patch('openai.OpenAI') as mock_openai:
- mock_openai.return_value.chat.completions.create.side_effect = TimeoutError("API timeout")
-
- agent = Agent(name="test_agent")
-
- with pytest.raises(APIError, match="API timeout"):
- agent.process_query("test query")
-```
-
-## Integration Testing
-
-### Testing Component Interactions
-
-Integration tests verify that components work together correctly:
-
-```python
-def test_agent_with_real_memory_integration():
- """Test agent with actual memory system integration."""
- # Create agent with real memory (not mocked)
- agent = Agent(name="integration_test_agent")
-
- # Add some memories
- agent.memory.add_memory("Previous conversation about DeFi", {"topic": "defi"})
- agent.memory.add_memory("Discussion about yield farming", {"topic": "yield"})
-
- # Query should use memory context
- result = agent.process_query("What did we discuss about DeFi before?")
-
- assert result is not None
- assert "DeFi" in str(result) or "defi" in str(result).lower()
-
-def test_hypervisor_with_supervised_tools():
- """Test hypervisor integration with supervised tools."""
- hypervisor = Hypervisor()
- github_tool = GithubTool()
- supervised_tool = SupervisedTool(github_tool, hypervisor.supervisor)
-
- # Test that tool execution goes through approval
- with patch.object(hypervisor.supervisor, 'approve_action') as mock_approve:
- mock_approve.return_value = ApprovalResult(approved=True, reason="Test approval")
-
- result = supervised_tool.execute("get_prs", repo="test/repo")
-
- mock_approve.assert_called_once()
- assert result is not None
-```
-
-### Database Integration Tests
-
-Test database operations with real database:
-
-```python
-@pytest.fixture
-def test_database():
- """Create test database for integration tests."""
- # Set up test database
- db_path = "test_memory.db"
- memory = Memory(db_path=db_path)
- yield memory
-
- # Cleanup after test
- import os
- if os.path.exists(db_path):
- os.remove(db_path)
-
-def test_memory_persistence_integration(test_database):
- """Test that memories persist across sessions."""
- memory = test_database
-
- # Add memory in first session
- memory.add_memory("Test memory for persistence", {"session": "first"})
- memory.flush()
-
- # Create new memory instance (simulating new session)
- new_memory = Memory(db_path=memory.db_path)
-
- # Search should find the persisted memory
- results = new_memory.search("Test memory", limit=1)
- assert len(results) == 1
- assert results[0].description == "Test memory for persistence"
-```
-
-## End-to-End Testing
-
-### Complete Workflow Tests
-
-Test entire user workflows from start to finish:
-
-```python
-def test_proposal_evaluation_workflow():
- """Test complete proposal evaluation workflow."""
- # Simulate user submitting a proposal
- proposal_text = """
- Proposal: Increase staking rewards from 5% to 8% APR
-
- Rationale: Current market conditions show competitors offering
- higher yields. This increase will help maintain competitiveness
- and attract more stakers to the protocol.
- """
-
- # Create main agent
- agent = MainAgent()
-
- # Process proposal evaluation request
- query = f"Please evaluate this governance proposal: {proposal_text}"
- result = agent.run(query)
-
- # Verify comprehensive evaluation
- assert result is not None
- assert isinstance(result, QueryResponse)
- assert len(result.answers) > 0
-
- response_text = str(result)
- assert "risk" in response_text.lower()
- assert "recommendation" in response_text.lower()
- assert any(word in response_text.lower() for word in ["approve", "reject", "modify"])
-
-def test_cli_interactive_workflow():
- """Test CLI interactive mode workflow."""
- from talos.cli.main import interactive_mode
- from unittest.mock import patch
- import io
- import sys
-
- # Mock user input
- user_inputs = [
- "What are your capabilities?",
- "Analyze sentiment for 'DeFi protocols'",
- "exit"
- ]
-
- with patch('builtins.input', side_effect=user_inputs):
- # Capture output
- captured_output = io.StringIO()
- with patch('sys.stdout', captured_output):
- interactive_mode()
-
- output = captured_output.getvalue()
- assert "capabilities" in output.lower()
- assert len(output) > 100 # Should have substantial output
-```
-
-### API Integration Tests
-
-Test external API integrations:
-
-```python
-@pytest.mark.integration
-def test_twitter_api_integration():
- """Test actual Twitter API integration (requires API keys)."""
- import os
-
- # Skip if no API keys available
- if not os.getenv('TWITTER_BEARER_TOKEN'):
- pytest.skip("Twitter API keys not available")
-
- from talos.tools.twitter import TwitterTools
-
- twitter_tools = TwitterTools()
-
- # Test actual API call with a safe query
- result = twitter_tools.search_tweets("python programming", limit=5)
-
- assert isinstance(result, list)
- assert len(result) <= 5
- assert all('text' in tweet for tweet in result)
-
-@pytest.mark.integration
-def test_github_api_integration():
- """Test actual GitHub API integration (requires API token)."""
- import os
-
- if not os.getenv('GITHUB_API_TOKEN'):
- pytest.skip("GitHub API token not available")
-
- from talos.tools.github import GithubTools
-
- github_tools = GithubTools()
-
- # Test with a known public repository
- prs = github_tools.get_all_pull_requests("octocat", "Hello-World", state="all")
-
- assert isinstance(prs, list)
- assert all('number' in pr for pr in prs)
-```
-
-## Performance Testing
-
-### Load Testing
-
-Test system performance under load:
-
-```python
-import time
-import concurrent.futures
-from talos.core.memory import Memory
-
-def test_memory_concurrent_access():
- """Test memory system under concurrent access."""
- memory = Memory(batch_size=10)
-
- def add_memories(thread_id: int, count: int):
- """Add memories from a specific thread."""
- for i in range(count):
- memory.add_memory(f"Memory {i} from thread {thread_id}", {"thread": thread_id})
-
- # Run concurrent memory additions
- with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
- futures = [executor.submit(add_memories, i, 20) for i in range(5)]
- concurrent.futures.wait(futures)
-
- memory.flush()
-
- # Verify all memories were added
- all_memories = memory.search("Memory", limit=1000)
- assert len(all_memories) == 100 # 5 threads * 20 memories each
-
-def test_agent_response_time():
- """Test that agent responses are within acceptable time limits."""
- agent = Agent(name="performance_test_agent")
-
- queries = [
- "What is DeFi?",
- "Explain yield farming",
- "How does staking work?",
- "What are governance tokens?",
- "Describe liquidity pools"
- ]
-
- response_times = []
-
- for query in queries:
- start_time = time.time()
- result = agent.process_query(query)
- end_time = time.time()
-
- response_time = end_time - start_time
- response_times.append(response_time)
-
- # Each response should be under 30 seconds
- assert response_time < 30.0
- assert result is not None
-
- # Average response time should be reasonable
- avg_response_time = sum(response_times) / len(response_times)
- assert avg_response_time < 15.0
-```
-
-### Memory Usage Testing
-
-Test memory consumption patterns:
-
-```python
-import psutil
-import os
-
-def test_memory_usage_scaling():
- """Test that memory usage scales reasonably with data size."""
- process = psutil.Process(os.getpid())
- initial_memory = process.memory_info().rss
-
- memory = Memory(batch_size=50)
-
- # Add a large number of memories
- for i in range(1000):
- memory.add_memory(f"Large memory entry {i} with substantial content " * 10)
-
- memory.flush()
-
- final_memory = process.memory_info().rss
- memory_increase = final_memory - initial_memory
-
- # Memory increase should be reasonable (less than 200MB for 1000 entries)
- assert memory_increase < 200 * 1024 * 1024
-
- # Test memory cleanup
- del memory
-
- # Memory should be released (allow some tolerance)
- cleanup_memory = process.memory_info().rss
- assert cleanup_memory < final_memory * 1.1
-```
-
-## Test Configuration
-
-### Pytest Configuration
-
-Create `conftest.py` with shared fixtures:
-
-```python
-import pytest
-import os
-import tempfile
-from unittest.mock import Mock
-from talos.core.agent import Agent
-from talos.core.memory import Memory
-
-@pytest.fixture(scope="session")
-def test_config():
- """Test configuration for all tests."""
- return {
- "model": "gpt-5",
- "test_mode": True,
- "api_timeout": 30
- }
-
-@pytest.fixture
-def temp_directory():
- """Create temporary directory for test files."""
- with tempfile.TemporaryDirectory() as temp_dir:
- yield temp_dir
-
-@pytest.fixture
-def mock_api_keys(monkeypatch):
- """Mock API keys for testing."""
- monkeypatch.setenv("OPENAI_API_KEY", "test-openai-key")
- monkeypatch.setenv("GITHUB_API_TOKEN", "test-github-token")
- monkeypatch.setenv("TWITTER_BEARER_TOKEN", "test-twitter-token")
-
-@pytest.fixture
-def test_agent(mock_api_keys):
- """Create test agent with mocked dependencies."""
- return Agent(name="test_agent", model="gpt-5")
-
-@pytest.fixture
-def test_memory(temp_directory):
- """Create test memory instance."""
- memory_path = os.path.join(temp_directory, "test_memory")
- return Memory(memory_path=memory_path, batch_size=5)
-```
-
-### Test Markers
-
-Use pytest markers to categorize tests:
-
-```python
-# pytest.ini or pyproject.toml
-[tool.pytest.ini_options]
-markers = [
- "unit: Unit tests",
- "integration: Integration tests",
- "e2e: End-to-end tests",
- "performance: Performance tests",
- "slow: Slow running tests",
- "api: Tests requiring API access"
-]
-```
-
-Run specific test categories:
-
-```bash
-# Run only unit tests
-uv run pytest -m unit
-
-# Run integration and e2e tests
-uv run pytest -m "integration or e2e"
-
-# Skip slow tests
-uv run pytest -m "not slow"
-
-# Run only API tests (when keys are available)
-uv run pytest -m api
-```
-
-## Continuous Integration
-
-### GitHub Actions Configuration
-
-Create `.github/workflows/test.yml`:
-
-```yaml
-name: Tests
-
-on:
- push:
- branches: [ main, develop ]
- pull_request:
- branches: [ main ]
-
-jobs:
- test:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: [3.8, 3.9, "3.10", "3.11"]
-
- steps:
- - uses: actions/checkout@v3
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v3
- with:
- python-version: ${{ matrix.python-version }}
-
- - name: Install uv
- run: curl -LsSf https://astral.sh/uv/install.sh | sh
-
- - name: Install dependencies
- run: |
- uv venv
- source .venv/bin/activate
- uv pip install -e .
- uv pip install pytest pytest-cov pytest-mock
-
- - name: Run unit tests
- run: |
- source .venv/bin/activate
- uv run pytest tests/unit/ -v --cov=src --cov-report=xml
-
- - name: Run integration tests
- run: |
- source .venv/bin/activate
- uv run pytest tests/integration/ -v
- env:
- OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- GITHUB_API_TOKEN: ${{ secrets.GITHUB_API_TOKEN }}
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v3
- with:
- file: ./coverage.xml
-```
-
-## Best Practices
-
-### Test Writing Guidelines
-
-1. **Test Names**: Use descriptive names that explain the scenario
-2. **Test Independence**: Each test should be independent and not rely on others
-3. **Test Data**: Use realistic test data that represents actual usage
-4. **Assertions**: Make specific assertions about expected behavior
-5. **Error Testing**: Test both success and failure scenarios
-
-### Mocking Guidelines
-
-1. **Mock External Dependencies**: Always mock external APIs and services
-2. **Mock at the Right Level**: Mock at the boundary of your system
-3. **Verify Interactions**: Assert that mocked methods are called correctly
-4. **Use Realistic Data**: Mock responses should match real API responses
-
-### Performance Testing
-
-1. **Set Realistic Limits**: Base performance expectations on actual requirements
-2. **Test Under Load**: Test with realistic data volumes and concurrent users
-3. **Monitor Resources**: Track memory, CPU, and network usage
-4. **Establish Baselines**: Create performance baselines to detect regressions
-
-### Test Maintenance
-
-1. **Keep Tests Updated**: Update tests when functionality changes
-2. **Remove Obsolete Tests**: Delete tests for removed functionality
-3. **Refactor Test Code**: Apply same quality standards to test code
-4. **Document Complex Tests**: Add comments for complex test scenarios
-
-This comprehensive testing approach ensures that Talos remains reliable, performant, and maintainable as it evolves.
diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md
deleted file mode 100644
index 3c472be5..00000000
--- a/docs/getting-started/installation.md
+++ /dev/null
@@ -1,163 +0,0 @@
-# Installation
-
-This guide will help you install and set up Talos on your system.
-
-## Prerequisites
-
-- Python 3.12 or higher
-- `uv` package manager (recommended) or `pip`
-- Git
-
-## Installation Methods
-
-### Using uv (Recommended)
-
-Talos uses `uv` for dependency management, which provides faster and more reliable package installation.
-
-1. **Install uv** (if not already installed):
- ```bash
- curl -LsSf https://astral.sh/uv/install.sh | sh
- ```
-
-2. **Clone the repository**:
- ```bash
- git clone https://github.com/talos-agent/talos.git
- cd talos
- ```
-
-3. **Create a virtual environment**:
- ```bash
- uv venv
- ```
-
-4. **Activate the virtual environment**:
- ```bash
- source .venv/bin/activate
- ```
-
-5. **Install dependencies**:
- ```bash
- ./scripts/install_deps.sh
- ```
-
-### Using pip
-
-If you prefer to use pip instead of uv:
-
-1. **Clone the repository**:
- ```bash
- git clone https://github.com/talos-agent/talos.git
- cd talos
- ```
-
-2. **Create a virtual environment**:
- ```bash
- python -m venv .venv
- source .venv/bin/activate
- ```
-
-3. **Install dependencies**:
- ```bash
- pip install -r requirements.txt
- pip install -e .
- ```
-
-## Environment Variables
-
-Talos requires several API keys to function properly. Set up the following environment variables:
-
-### Required for Basic Functionality
-```bash
-export OPENAI_API_KEY="your-openai-api-key"
-export PINATA_API_KEY="your-pinata-api-key"
-export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
-```
-
-### Required for Full Functionality
-```bash
-export GITHUB_API_TOKEN="your-github-token"
-export TWITTER_BEARER_TOKEN="your-twitter-bearer-token"
-export ARBISCAN_API_KEY="your-arbiscan-api-key"
-```
-
-!!! tip "Environment File"
- You can create a `.env` file in the project root with these variables for convenience:
- ```bash
- OPENAI_API_KEY=your-openai-api-key
- PINATA_API_KEY=your-pinata-api-key
- PINATA_SECRET_API_KEY=your-pinata-secret-api-key
- GITHUB_API_TOKEN=your-github-token
- TWITTER_BEARER_TOKEN=your-twitter-bearer-token
- ARBISCAN_API_KEY=your-arbiscan-api-key
- ```
-
-## Docker Installation
-
-### Building and Running with Docker
-
-1. **Build the Docker image**:
- ```bash
- docker build -t talos-agent .
- ```
-
-2. **Run the container**:
- ```bash
- docker run -d \
- -e OPENAI_API_KEY="your-openai-api-key" \
- -e GITHUB_API_TOKEN="your-github-token" \
- -e TWITTER_BEARER_TOKEN="your-twitter-bearer-token" \
- -e PINATA_API_KEY="your-pinata-api-key" \
- -e PINATA_SECRET_API_KEY="your-pinata-secret-api-key" \
- -e ARBISCAN_API_KEY="your-arbiscan-api-key" \
- --name talos-agent \
- talos-agent
- ```
-
-### Using Docker Compose
-
-1. **Create a `.env` file** with your API keys (see above)
-
-2. **Start the service**:
- ```bash
- docker-compose up -d
- ```
-
-3. **View logs**:
- ```bash
- docker-compose logs -f
- ```
-
-4. **Stop the service**:
- ```bash
- docker-compose down
- ```
-
-## Verification
-
-To verify your installation is working correctly:
-
-1. **Run the interactive CLI**:
- ```bash
- uv run talos
- ```
-
-2. **Run a simple test**:
- ```bash
- uv run talos "Hello, what can you do?"
- ```
-
-If everything is set up correctly, Talos should respond with information about its capabilities.
-
-## Troubleshooting
-
-### Common Issues
-
-**Missing API Keys**: Ensure all required environment variables are set. The agent will not function without valid API keys.
-
-**Permission Errors**: Make sure you have the necessary permissions for the directories and that your virtual environment is activated.
-
-**Network Issues**: Some features require internet access for API calls to OpenAI, GitHub, Twitter, and IPFS services.
-
-**Docker Issues**: Ensure Docker is running and you have sufficient permissions to build and run containers.
-
-For more help, check the [Development](../development/contributing.md) section or open an issue on GitHub.
diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md
deleted file mode 100644
index 73b15de0..00000000
--- a/docs/getting-started/overview.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Overview
-
-Talos is an AI agent designed to function as an autonomous owner and governor for decentralized protocols, specifically focused on managing cryptocurrency treasuries.
-
-## Core Capabilities
-
-### Autonomous Treasury Management
-- Dynamically adjusts staking APRs based on market conditions
-- Performs sentiment analysis to inform decisions
-- Manages supply metrics and protocol economics
-- Deploys capital through ERC-4626 vaults
-
-### Governance Operations
-- Evaluates protocol upgrade proposals
-- Generates execution plans for approved changes
-- Provides detailed analysis and recommendations
-- Maintains protocol integrity through supervised execution
-
-### Community Engagement
-- Monitors and interacts with social media (Twitter)
-- Gauges community sentiment
-- Manages protocol reputation
-- Provides updates and answers questions
-
-### Development Oversight
-- Reviews GitHub pull requests
-- Manages development workflows
-- Maintains code quality standards
-- Automates repository management
-
-### Security & Supervision
-- Uses hypervisor system to approve/deny all agent actions
-- Implements rule-based supervision
-- Maintains audit trails of all decisions
-- Ensures protocol safety through multiple validation layers
-
-## Target Users
-
-Talos is designed for decentralized protocol teams who want an AI agent to autonomously manage their protocol's operations, treasury, and governance while maintaining security through supervised execution.
-
-## Key Benefits
-
-- **Minimal Human Intervention**: Operates autonomously while maintaining security
-- **Comprehensive Monitoring**: Tracks market conditions, sentiment, and protocol metrics
-- **Modular Architecture**: Extensible skill-based system for new capabilities
-- **Supervised Execution**: All actions require approval through the hypervisor system
-- **Consistent Behavior**: Prompt management ensures reliable AI responses
diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md
deleted file mode 100644
index 687b54ff..00000000
--- a/docs/getting-started/quickstart.md
+++ /dev/null
@@ -1,156 +0,0 @@
-# Quick Start
-
-Get up and running with Talos in just a few minutes.
-
-## Basic Setup
-
-1. **Install Talos** (see [Installation](installation.md) for details):
- ```bash
- git clone https://github.com/talos-agent/talos.git
- cd talos
- uv venv && source .venv/bin/activate
- ./scripts/install_deps.sh
- ```
-
-2. **Set up environment variables**:
- ```bash
- export OPENAI_API_KEY="your-openai-api-key"
- export PINATA_API_KEY="your-pinata-api-key"
- export PINATA_SECRET_API_KEY="your-pinata-secret-api-key"
- ```
-
-## Usage Modes
-
-### Interactive CLI
-
-Start an interactive conversation with Talos:
-
-```bash
-uv run talos
-```
-
-You'll see a prompt where you can ask questions or give commands:
-
-```
->> What are your main capabilities?
->> Analyze the sentiment around "DeFi protocols" on Twitter
->> Help me evaluate a governance proposal
-```
-
-Type `exit` to quit the interactive session.
-
-### Single Query Mode
-
-Run a single query and exit:
-
-```bash
-uv run talos "What is the current market sentiment?"
-```
-
-### Daemon Mode
-
-Run Talos continuously for scheduled operations:
-
-```bash
-export GITHUB_API_TOKEN="your-github-token"
-export TWITTER_BEARER_TOKEN="your-twitter-bearer-token"
-uv run talos daemon
-```
-
-The daemon will:
-- Execute scheduled jobs
-- Monitor for new proposals
-- Perform continuous market analysis
-- Handle automated responses
-
-## Common Commands
-
-### Twitter Analysis
-```bash
-# Get user sentiment analysis
-uv run talos twitter get-user-prompt username
-
-# Analyze query sentiment
-uv run talos twitter get-query-sentiment "DeFi yield farming"
-```
-
-### GitHub Operations
-```bash
-# Set up GitHub repository
-export GITHUB_REPO=owner/repo
-
-# List pull requests
-uv run talos github get-prs
-
-# Review a pull request
-uv run talos github review-pr 123 --post
-
-# Approve a pull request
-uv run talos github approve-pr 123
-```
-
-### Cryptography
-```bash
-# Generate RSA key pair
-uv run talos generate-keys
-
-# Get public key
-uv run talos get-public-key
-
-# Encrypt data
-uv run talos encrypt "secret message" public_key.pem
-
-# Decrypt data
-uv run talos decrypt "encrypted_data"
-```
-
-## Example Workflows
-
-### Proposal Evaluation
-
-1. **Run the proposal example**:
- ```bash
- python proposal_example.py
- ```
-
-2. **Interactive proposal analysis**:
- ```bash
- uv run talos
- >> I need help evaluating a governance proposal about increasing staking rewards
- ```
-
-### Market Analysis
-
-```bash
-uv run talos
->> Analyze the current market conditions for ETH
->> What's the sentiment around yield farming protocols?
->> Should we adjust our staking APR based on current conditions?
-```
-
-### GitHub Management
-
-```bash
-# Set up environment
-export GITHUB_API_TOKEN=your_token
-export GITHUB_REPO=your-org/your-repo
-
-# Review recent PRs
-uv run talos github get-prs --state all
-
-# Get AI review of a specific PR
-uv run talos github review-pr 42 --post
-```
-
-## Next Steps
-
-- **Learn the Architecture**: Understand how Talos works by reading the [Architecture](../architecture/components.md) documentation
-- **Explore CLI Commands**: Check out the complete [CLI Reference](../cli/overview.md)
-- **Contribute**: See the [Development](../development/contributing.md) guide to contribute to the project
-- **Advanced Usage**: Learn about the [Philosophy](../philosophy/vision.md) and roadmap behind Talos
-
-## Getting Help
-
-- Check the [CLI Reference](../cli/overview.md) for detailed command documentation
-- Review [Development](../development/contributing.md) for troubleshooting
-- Open an issue on [GitHub](https://github.com/talos-agent/talos) for bugs or feature requests
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index 5c7de311..00000000
--- a/docs/index.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# Talos: An AI Protocol Owner
-
-Welcome to Talos, an AI agent designed to act as an autonomous owner for decentralized protocols. Talos is not just a chatbot; it is a sophisticated AI system that can manage and govern a protocol, ensuring its integrity and security.
-
-!!! info "Official Documentation"
- The official documentation for the Talos project can be found at [docs.talos.is](https://docs.talos.is/).
-
-## What is Talos?
-
-Talos is an AI agent that can:
-
-- **Govern Protocol Actions:** Talos uses a Hypervisor to monitor and approve or deny actions taken by other agents or system components. This ensures that all actions align with the protocol's rules and objectives.
-- **Evaluate Governance Proposals:** Talos can analyze and provide recommendations on governance proposals, considering their potential benefits, risks, and community feedback.
-- **Interact with the Community:** Talos can engage with the community on platforms like Twitter to provide updates, answer questions, and gather feedback.
-- **Manage its Own Codebase:** Talos can interact with GitHub to manage its own source code, including reviewing and committing changes.
-- **Update Documentation:** Talos can update its own documentation on GitBook to ensure it remains accurate and up-to-date.
-
-## Key Features
-
-### Autonomous Treasury Management
-Talos continuously monitors volatility, yield curves, and risk surfaces to compute optimal capital paths. Each strategy proposal must first be approved by the council, then deployed through ERC-4626 vaults spanning sophisticated LP positions to simple ETH lending.
-
-### Governance & Security
-The Hypervisor system ensures all actions are monitored and approved based on predefined rules and agent history, protecting the protocol from malicious or erroneous actions.
-
-### Community Engagement
-Talos can engage with the community on social platforms, providing updates, answering questions, and gathering feedback to inform protocol decisions.
-
-## Quick Links
-
-- [Getting Started](getting-started/overview.md) - Learn how to install and use Talos
-- [Architecture](architecture/components.md) - Understand the core components and design
-- [CLI Reference](cli/overview.md) - Complete command-line interface documentation
-- [Development](development/contributing.md) - Contributing guidelines and development setup
-
-## Repository Structure
-
-The repository is organized as follows:
-
-- `.github/` - GitHub Actions workflows for CI/CD
-- `src/talos/` - Main source code for the Talos agent
- - `core/` - Core components (CLI, main agent loop)
- - `hypervisor/` - Hypervisor and Supervisor components
- - `services/` - Different services (proposal evaluation, etc.)
- - `prompts/` - Agent prompts and templates
- - `tools/` - External integrations (GitHub, Twitter, IPFS)
-- `tests/` - Test suite
-- `docs/` - Documentation source files
diff --git a/docs/philosophy/roadmap.md b/docs/philosophy/roadmap.md
deleted file mode 100644
index f0beb8a4..00000000
--- a/docs/philosophy/roadmap.md
+++ /dev/null
@@ -1,290 +0,0 @@
-# Roadmap
-
-This roadmap outlines the evolution of Talos from its current state toward becoming a fully autonomous AI protocol owner with advanced cognitive capabilities.
-
-## Current State (Q4 2024)
-
-### Implemented Capabilities
-
-**Core Agent System:**
-- ✅ Basic agent architecture with LLM integration
-- ✅ Memory system with semantic search
-- ✅ Hypervisor and supervised execution
-- ✅ Modular skill and service architecture
-- ✅ CLI interface for interaction
-
-**External Integrations:**
-- ✅ GitHub API integration for repository management
-- ✅ Twitter API integration for sentiment analysis
-- ✅ IPFS integration for decentralized storage
-- ✅ Cryptographic operations for security
-
-**Governance Capabilities:**
-- ✅ Proposal evaluation system
-- ✅ Basic sentiment analysis
-- ✅ Rule-based supervision
-- ✅ Audit trail and logging
-
-## Phase 1: Foundation Strengthening (Q1 2025)
-
-### Performance Optimization
-**Target**: Improve system performance and reliability
-
-**Key Deliverables:**
-- **Memory System Optimization** - Implement batched writes and caching
-- **API Rate Limiting** - Intelligent rate limiting and request optimization
-- **Error Handling** - Comprehensive error recovery and resilience
-- **Monitoring & Alerting** - Real-time system health monitoring
-
-**Success Metrics:**
-- 50% reduction in memory operation latency
-- 99.9% uptime for core services
-- Sub-second response times for common queries
-- Zero data loss incidents
-
-### Enhanced Security
-**Target**: Strengthen security and supervision mechanisms
-
-**Key Deliverables:**
-- **Advanced Hypervisor Rules** - More sophisticated approval logic
-- **Multi-layer Security** - Defense in depth for critical operations
-- **Audit System** - Comprehensive audit trails and compliance
-- **Threat Detection** - Automated detection of malicious activities
-
-**Success Metrics:**
-- Zero security incidents
-- 100% action approval coverage
-- Complete audit trail for all operations
-- Automated threat detection and response
-
-## Phase 2: Intelligence Enhancement (Q2 2025)
-
-### Advanced Analytics
-**Target**: Implement sophisticated analysis capabilities
-
-**Key Deliverables:**
-- **Market Analysis Engine** - Real-time market condition analysis
-- **Predictive Modeling** - ML models for trend prediction
-- **Risk Assessment** - Comprehensive risk evaluation framework
-- **Competitive Intelligence** - Automated competitor monitoring
-
-**Success Metrics:**
-- 80% accuracy in market trend predictions
-- Real-time processing of 1000+ data sources
-- Automated risk scoring for all decisions
-- Daily competitive intelligence reports
-
-### Enhanced Decision Making
-**Target**: Improve decision quality and sophistication
-
-**Key Deliverables:**
-- **Multi-criteria Decision Analysis** - Sophisticated decision frameworks
-- **Scenario Planning** - What-if analysis for major decisions
-- **Stakeholder Impact Analysis** - Consider all affected parties
-- **Long-term Strategy Planning** - Strategic thinking capabilities
-
-**Success Metrics:**
-- 90% community satisfaction with decisions
-- Measurable improvement in protocol metrics
-- Successful implementation of long-term strategies
-- Reduced need for human intervention
-
-## Phase 3: Autonomous Operations (Q3 2025)
-
-### Treasury Management
-**Target**: Implement autonomous treasury optimization
-
-**Key Deliverables:**
-- **Dynamic APR Management** - Real-time staking reward optimization
-- **Yield Strategy Engine** - Automated yield farming strategies
-- **Liquidity Management** - Optimal liquidity provision strategies
-- **Risk Management** - Automated risk mitigation and hedging
-
-**Success Metrics:**
-- Outperform manual treasury management by 20%
-- Maintain optimal liquidity ratios automatically
-- Zero treasury security incidents
-- Consistent positive risk-adjusted returns
-
-### Community Engagement
-**Target**: Autonomous community interaction and management
-
-**Key Deliverables:**
-- **Social Media Management** - Automated posting and engagement
-- **Community Support** - AI-powered community assistance
-- **Feedback Integration** - Systematic community feedback processing
-- **Reputation Management** - Proactive reputation monitoring
-
-**Success Metrics:**
-- 95% positive community sentiment
-- 24/7 community support availability
-- Increased community engagement metrics
-- Proactive issue resolution
-
-## Phase 4: Advanced Cognition (Q4 2025)
-
-### Multi-Protocol Coordination
-**Target**: Coordinate strategies across multiple protocols
-
-**Key Deliverables:**
-- **Cross-Protocol Analytics** - Analysis across DeFi ecosystem
-- **Coordination Mechanisms** - Inter-protocol communication
-- **Ecosystem Optimization** - System-wide optimization strategies
-- **Partnership Management** - Automated partnership evaluation
-
-**Success Metrics:**
-- Successful coordination with 5+ protocols
-- Measurable ecosystem-wide improvements
-- Automated partnership negotiations
-- Industry recognition as coordination leader
-
-### Learning and Adaptation
-**Target**: Implement advanced learning capabilities
-
-**Key Deliverables:**
-- **Outcome Analysis** - Systematic analysis of decision outcomes
-- **Strategy Refinement** - Continuous improvement of strategies
-- **Pattern Recognition** - Advanced pattern detection in markets
-- **Adaptive Behavior** - Dynamic adaptation to changing conditions
-
-**Success Metrics:**
-- Demonstrable improvement in decision quality over time
-- Successful adaptation to major market changes
-- Recognition of novel patterns and opportunities
-- Self-directed capability enhancement
-
-## Phase 5: AGI Development (2026)
-
-### Artificial General Intelligence
-**Target**: Develop AGI capabilities for financial protocol management
-
-**Key Deliverables:**
-- **General Problem Solving** - Apply intelligence to novel problems
-- **Creative Strategy Development** - Generate innovative solutions
-- **Abstract Reasoning** - Handle complex, abstract concepts
-- **Transfer Learning** - Apply knowledge across domains
-
-**Success Metrics:**
-- Successful handling of unprecedented situations
-- Development of novel governance mechanisms
-- Recognition as AGI breakthrough in finance
-- Academic and industry validation
-
-### Distributed Agent Network
-**Target**: Create network of coordinated autonomous agents
-
-**Key Deliverables:**
-- **Agent Communication Protocol** - Standardized inter-agent communication
-- **Distributed Decision Making** - Coordinated multi-agent decisions
-- **Specialization Framework** - Specialized agents for different domains
-- **Network Governance** - Governance of the agent network itself
-
-**Success Metrics:**
-- Successful deployment of 10+ coordinated agents
-- Measurable network effects and improvements
-- Industry adoption of agent network standards
-- Self-governing agent ecosystem
-
-## Long-term Vision (2027+)
-
-### Industry Transformation
-**Target**: Lead transformation of DeFi industry
-
-**Objectives:**
-- **Standard Setting** - Establish industry standards for autonomous governance
-- **Ecosystem Leadership** - Lead development of autonomous DeFi ecosystem
-- **Regulatory Engagement** - Work with regulators on AI governance frameworks
-- **Academic Collaboration** - Contribute to research on autonomous economic systems
-
-### Societal Impact
-**Target**: Demonstrate positive societal impact of autonomous AI
-
-**Objectives:**
-- **Economic Efficiency** - Measurable improvements in capital allocation efficiency
-- **Financial Inclusion** - Democratize access to sophisticated financial management
-- **Innovation Acceleration** - Accelerate development of beneficial financial technologies
-- **Ethical AI Leadership** - Demonstrate responsible AI development and deployment
-
-## Implementation Strategy
-
-### Development Approach
-
-**Iterative Development:**
-- Quarterly releases with incremental improvements
-- Continuous integration and deployment
-- Regular community feedback integration
-- Agile adaptation to changing requirements
-
-**Risk Management:**
-- Comprehensive testing at each phase
-- Gradual rollout of new capabilities
-- Fallback mechanisms for critical functions
-- Regular security audits and assessments
-
-**Community Involvement:**
-- Open development process with community input
-- Regular progress updates and demonstrations
-- Community testing and feedback programs
-- Transparent decision making and prioritization
-
-### Success Factors
-
-**Technical Excellence:**
-- Maintain high code quality and documentation standards
-- Implement robust testing and quality assurance
-- Ensure scalability and performance optimization
-- Follow security best practices throughout
-
-**Community Trust:**
-- Maintain transparency in all operations
-- Deliver on commitments and timelines
-- Respond effectively to community feedback
-- Demonstrate consistent value delivery
-
-**Industry Leadership:**
-- Contribute to open-source DeFi infrastructure
-- Share knowledge and best practices
-- Collaborate with other projects and researchers
-- Influence industry standards and practices
-
-## Milestones and Metrics
-
-### Quarterly Milestones
-
-**Q1 2025:**
-- Performance optimization complete
-- Enhanced security framework deployed
-- 99.9% system uptime achieved
-
-**Q2 2025:**
-- Advanced analytics engine operational
-- Predictive modeling accuracy >80%
-- Enhanced decision making framework deployed
-
-**Q3 2025:**
-- Autonomous treasury management live
-- Community engagement automation active
-- Outperforming manual management benchmarks
-
-**Q4 2025:**
-- Multi-protocol coordination operational
-- Advanced learning capabilities demonstrated
-- Industry recognition as innovation leader
-
-### Long-term Success Metrics
-
-**Technical Metrics:**
-- System reliability and performance benchmarks
-- Decision quality and accuracy measurements
-- Community satisfaction and engagement levels
-- Financial performance and risk metrics
-
-**Impact Metrics:**
-- Industry adoption of Talos-pioneered practices
-- Academic citations and research contributions
-- Regulatory recognition and engagement
-- Societal benefit measurements
-
-This roadmap represents an ambitious but achievable path toward creating the world's first truly autonomous AI protocol owner. Each phase builds upon the previous one, creating a foundation for increasingly sophisticated capabilities while maintaining security, transparency, and community trust.
-
-The ultimate goal is not just to create an advanced AI system, but to demonstrate how artificial intelligence can be developed and deployed responsibly to create significant positive impact in the financial sector and beyond.
diff --git a/docs/philosophy/vision.md b/docs/philosophy/vision.md
deleted file mode 100644
index 849869b1..00000000
--- a/docs/philosophy/vision.md
+++ /dev/null
@@ -1,266 +0,0 @@
-# Vision & Philosophy
-
-Talos represents a paradigm shift in decentralized protocol management, embodying the vision of truly autonomous treasury governance through artificial intelligence.
-
-## Core Vision
-
-### The Autonomous Protocol Owner
-
-Talos is designed to function as an **AI Protocol Owner** - not merely a tool or assistant, but an autonomous entity capable of making complex decisions about protocol management, treasury optimization, and community governance.
-
-**Key Principles:**
-- **Autonomy with Oversight** - Operates independently while maintaining security through supervised execution
-- **Data-Driven Decisions** - All decisions based on comprehensive market analysis and community sentiment
-- **Transparent Governance** - All actions and reasoning are auditable and explainable
-- **Community-Centric** - Prioritizes long-term protocol health and community benefit
-
-### Beyond Traditional Automation
-
-Traditional DeFi protocols rely on:
-- Manual governance processes
-- Human-driven treasury management
-- Reactive decision making
-- Limited data integration
-
-Talos enables:
-- **Proactive Management** - Anticipates market changes and adjusts strategies
-- **Holistic Analysis** - Integrates market data, sentiment, and protocol metrics
-- **Continuous Optimization** - Constantly refines strategies based on outcomes
-- **Scalable Governance** - Handles complex decisions without human bottlenecks
-
-## Philosophical Foundations
-
-### Cognitive Architecture
-
-Talos operates on three layers of cognition, each building upon the previous:
-
-#### 1. Inference Layer
-**Purpose**: Real-time decision making and immediate responses
-
-**Capabilities:**
-- Market condition analysis
-- Sentiment evaluation
-- Risk assessment
-- Tactical adjustments
-
-**Example**: Adjusting staking APR based on current market volatility and competitor analysis.
-
-#### 2. Training Layer
-**Purpose**: Learning from outcomes and improving decision quality
-
-**Capabilities:**
-- Strategy effectiveness analysis
-- Pattern recognition in market behavior
-- Community response learning
-- Decision quality improvement
-
-**Example**: Learning that certain APR adjustments during high volatility periods lead to better user retention.
-
-#### 3. Coordination Layer
-**Purpose**: Long-term strategic planning and multi-protocol coordination
-
-**Capabilities:**
-- Cross-protocol strategy development
-- Ecosystem-wide optimization
-- Long-term trend analysis
-- Strategic partnership evaluation
-
-**Example**: Coordinating with other protocols to optimize liquidity flows across the entire DeFi ecosystem.
-
-### Autonomous Treasury Management
-
-#### Dynamic Capital Optimization
-
-Talos continuously monitors and optimizes capital deployment across multiple dimensions:
-
-**Market Dynamics:**
-- Volatility analysis and risk-adjusted returns
-- Yield curve analysis across different protocols
-- Liquidity depth and market impact assessment
-- Correlation analysis between different assets
-
-**Community Sentiment:**
-- Social media sentiment tracking
-- Community feedback analysis
-- Governance participation patterns
-- User behavior analytics
-
-**Protocol Health:**
-- TVL trends and user acquisition metrics
-- Revenue generation and sustainability
-- Security incident monitoring
-- Competitive positioning analysis
-
-#### Incentive Mechanism Design
-
-**Bonding Curve Optimization:**
-- Dynamic bonding curves that adjust based on market conditions
-- Incentive alignment between protocol growth and user rewards
-- Anti-gaming mechanisms to prevent exploitation
-- Long-term sustainability considerations
-
-**Staking Reward Calibration:**
-- Real-time APR adjustments based on market conditions
-- Balancing user attraction with protocol sustainability
-- Consideration of opportunity costs and competitive landscape
-- Integration with overall tokenomics strategy
-
-### Governance Philosophy
-
-#### Supervised Autonomy
-
-Talos operates under a **supervised autonomy** model that balances independence with security:
-
-**Hypervisor System:**
-- All actions require approval through rule-based or AI-driven supervision
-- Multi-layered security with different approval thresholds
-- Audit trails for all decisions and their reasoning
-- Emergency override capabilities for critical situations
-
-**Community Integration:**
-- Regular community updates on decisions and reasoning
-- Feedback integration into future decision making
-- Transparent reporting on performance and outcomes
-- Democratic oversight through governance mechanisms
-
-#### Ethical AI Governance
-
-**Transparency:**
-- All decision logic is explainable and auditable
-- Regular publication of decision rationale and outcomes
-- Open-source development with community oversight
-- Clear documentation of capabilities and limitations
-
-**Fairness:**
-- Decisions consider impact on all stakeholders
-- No preferential treatment for specific user groups
-- Balanced consideration of short-term and long-term effects
-- Protection of minority interests in governance decisions
-
-**Accountability:**
-- Clear responsibility chains for all decisions
-- Regular performance reviews and adjustments
-- Community feedback integration mechanisms
-- Continuous improvement based on outcomes
-
-## Strategic Objectives
-
-### Short-term Goals (0-6 months)
-
-**Operational Excellence:**
-- Achieve consistent, profitable treasury management
-- Demonstrate superior decision quality compared to manual processes
-- Build trust through transparent and explainable decisions
-- Establish robust security and oversight mechanisms
-
-**Community Building:**
-- Engage actively with protocol communities
-- Provide valuable insights and analysis
-- Build reputation as a trusted autonomous agent
-- Gather feedback for continuous improvement
-
-### Medium-term Goals (6-18 months)
-
-**Advanced Capabilities:**
-- Implement sophisticated multi-protocol strategies
-- Develop predictive models for market movements
-- Create innovative incentive mechanisms
-- Establish cross-protocol coordination capabilities
-
-**Ecosystem Integration:**
-- Partner with other protocols for mutual benefit
-- Contribute to DeFi infrastructure development
-- Share insights and best practices with the community
-- Influence industry standards for autonomous governance
-
-### Long-term Vision (18+ months)
-
-**Artificial General Intelligence for DeFi:**
-- Develop AGI capabilities specifically for financial protocol management
-- Create a network of coordinated autonomous agents
-- Establish new paradigms for decentralized governance
-- Pioneer the future of autonomous economic systems
-
-**Ecosystem Transformation:**
-- Lead the transition to fully autonomous DeFi protocols
-- Demonstrate the viability of AI-driven economic systems
-- Create templates and frameworks for other protocols
-- Establish new standards for autonomous governance
-
-## Success Metrics
-
-### Performance Indicators
-
-**Financial Performance:**
-- Risk-adjusted returns compared to benchmarks
-- Treasury growth and sustainability metrics
-- User acquisition and retention rates
-- Revenue generation and protocol health
-
-**Decision Quality:**
-- Accuracy of market predictions and adjustments
-- Community satisfaction with governance decisions
-- Reduction in manual intervention requirements
-- Improvement in protocol metrics over time
-
-**Community Impact:**
-- User engagement and participation levels
-- Community sentiment and trust metrics
-- Governance participation and voting patterns
-- Feedback quality and implementation rates
-
-### Innovation Metrics
-
-**Technical Advancement:**
-- Development of new autonomous governance mechanisms
-- Creation of novel incentive structures
-- Implementation of advanced AI capabilities
-- Contribution to open-source DeFi infrastructure
-
-**Industry Influence:**
-- Adoption of Talos-pioneered practices by other protocols
-- Recognition as a leader in autonomous governance
-- Contribution to academic research and industry standards
-- Influence on regulatory and policy discussions
-
-## Future Implications
-
-### The Path to AGI
-
-Talos represents a stepping stone toward Artificial General Intelligence in the financial domain:
-
-**Specialized Intelligence:**
-- Deep expertise in DeFi protocol management
-- Sophisticated understanding of market dynamics
-- Advanced community sentiment analysis
-- Complex multi-stakeholder decision making
-
-**General Capabilities:**
-- Transfer learning across different protocols
-- Adaptation to new market conditions and mechanisms
-- Creative problem solving for novel challenges
-- Strategic thinking and long-term planning
-
-### Societal Impact
-
-**Economic Efficiency:**
-- More efficient capital allocation across DeFi protocols
-- Reduced human error and bias in financial decisions
-- 24/7 monitoring and optimization capabilities
-- Democratization of sophisticated financial management
-
-**Innovation Acceleration:**
-- Rapid experimentation with new governance mechanisms
-- Data-driven insights into protocol optimization
-- Cross-protocol learning and best practice sharing
-- Acceleration of DeFi ecosystem development
-
-**Governance Evolution:**
-- New models for decentralized decision making
-- Reduced reliance on human governance bottlenecks
-- More responsive and adaptive protocol management
-- Enhanced transparency and accountability
-
-The vision of Talos extends beyond simple automation to represent a fundamental evolution in how decentralized protocols can be managed, governed, and optimized. Through the combination of advanced AI capabilities, robust security mechanisms, and community integration, Talos pioneers a new era of autonomous economic systems that are more efficient, transparent, and responsive than traditional approaches.
-
-This vision guides every aspect of Talos's development and operation, ensuring that each decision and capability advancement moves toward the ultimate goal of creating truly autonomous, beneficial, and trustworthy AI systems for managing decentralized economic protocols.
diff --git a/entrypoint.sh b/entrypoint.sh
deleted file mode 100644
index e3db390e..00000000
--- a/entrypoint.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-set -e
-
-# Ensure the data directory exists (should already exist from Dockerfile)
-mkdir -p /app/data
-
-# Run migrations if needed
-echo "Running database migrations..."
-python -c "
-from talos.database import init_database, run_migrations, check_migration_status
-from talos.database.session import get_database_url
-from sqlalchemy import create_engine
-import logging
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-try:
- init_database()
- database_url = get_database_url()
- engine = create_engine(database_url)
- migration_status = check_migration_status(engine)
- logger.info(f'Database migration status: {migration_status}')
-
- if migration_status['needs_migration']:
- logger.info('Running database migrations...')
- run_migrations(engine)
- logger.info('Database migrations completed successfully')
- else:
- logger.info('Database is up to date')
-except Exception as e:
- logger.error(f'Failed to run database migrations: {e}')
- exit(1)
-"
-
-# Execute the main command
-exec "$@"
diff --git a/examples/README.md b/examples/README.md
deleted file mode 100644
index 767e22ea..00000000
--- a/examples/README.md
+++ /dev/null
@@ -1,140 +0,0 @@
-# Talos Examples
-
-This directory contains example implementations and demonstrations of Talos functionality.
-
-## LangGraph Interactive Example
-
-### Overview
-
-The `langgraph_interactive_example.py` demonstrates a complete multi-step DAG (Directed Acyclic Graph) implementation using LangGraph with the following features:
-
-- **Multi-step processing pipeline** with 4 distinct stages
-- **Conditional branching** that routes queries to specialized processors
-- **Memory persistence** using LangGraph's MemorySaver
-- **Interactive CLI** for testing different scenarios
-- **State management** that flows through all nodes
-
-### DAG Architecture
-
-```
-START → query_analyzer → router → [sentiment|proposal|general]_processor → output_formatter → END
-```
-
-#### Node Types:
-
-1. **query_analyzer**: Analyzes incoming queries for intent and content
-2. **router**: Determines which specialized processor to use based on keywords
-3. **sentiment_processor**: Handles sentiment analysis and emotional content
-4. **proposal_processor**: Processes governance and DAO-related queries
-5. **general_processor**: Handles general-purpose queries
-6. **output_formatter**: Creates final structured responses
-
-#### Branching Logic:
-
-- **Sentiment path**: Triggered by keywords like "sentiment", "feeling", "twitter", "social"
-- **Proposal path**: Triggered by keywords like "proposal", "governance", "vote", "dao"
-- **General path**: Default path for all other queries
-
-### Usage
-
-#### Prerequisites
-
-```bash
-export OPENAI_API_KEY="your-openai-api-key"
-```
-
-#### Running the Example
-
-```bash
-# From the talos root directory
-cd ~/repos/talos
-python examples/langgraph_interactive_example.py
-```
-
-#### Example Queries
-
-The demo includes several pre-configured example queries that demonstrate different processing paths:
-
-1. **Sentiment Analysis**: "What's the sentiment around the latest protocol update on Twitter?"
-2. **Governance**: "I need help evaluating this governance proposal for the DAO"
-3. **General**: "Can you explain how blockchain consensus mechanisms work?"
-4. **Emotional**: "The community seems really excited about the new features!"
-5. **Risk Assessment**: "What are the risks of implementing this treasury management proposal?"
-
-### Key Features Demonstrated
-
-#### 1. Multi-Step DAG Execution
-Each query goes through exactly 4 processing steps, demonstrating the sequential nature of the DAG while allowing for parallel processing paths.
-
-#### 2. Conditional Branching
-The router node analyzes query content and routes to appropriate specialized processors, showing how LangGraph handles conditional logic.
-
-#### 3. State Management
-The `AgentState` TypedDict flows through all nodes, accumulating results and maintaining context throughout the execution.
-
-#### 4. Memory Persistence
-Uses LangGraph's `MemorySaver` with thread-based conversation tracking, allowing for stateful interactions.
-
-#### 5. Error Handling
-Comprehensive error handling with graceful fallbacks and informative error messages.
-
-### Technical Implementation
-
-#### State Schema
-```python
-class AgentState(TypedDict):
- messages: List[BaseMessage]
- query: str
- query_type: str
- analysis_result: Dict[str, Any]
- processing_result: Dict[str, Any]
- final_output: str
- metadata: Dict[str, Any]
-```
-
-#### Graph Construction
-```python
-workflow = StateGraph(AgentState)
-workflow.add_node("query_analyzer", self._analyze_query)
-workflow.add_node("router", self._route_query)
-# ... additional nodes
-
-workflow.add_conditional_edges(
- "router",
- self._determine_next_node,
- {
- "sentiment": "sentiment_processor",
- "proposal": "proposal_processor",
- "general": "general_processor"
- }
-)
-```
-
-### Integration with Talos
-
-This example is designed to be:
-- **Standalone**: Can run independently without modifying core Talos functionality
-- **Educational**: Clearly demonstrates LangGraph concepts and patterns
-- **Extensible**: Easy to add new node types and routing logic
-- **Compatible**: Uses the same patterns as the existing Talos DAG implementation
-
-### Extending the Example
-
-To add new processing paths:
-
-1. Create a new processor function (e.g., `_process_crypto_query`)
-2. Add the node to the workflow: `workflow.add_node("crypto_processor", self._process_crypto_query)`
-3. Update the routing logic in `_route_query` and `_determine_next_node`
-4. Add the new path to conditional edges
-5. Connect the new processor to the output formatter
-
-### Troubleshooting
-
-**Common Issues:**
-
-1. **Missing API Key**: Ensure `OPENAI_API_KEY` is set in your environment
-2. **Import Errors**: Make sure you're running from the talos root directory
-3. **Model Access**: The example uses `gpt-4o-mini` - ensure your API key has access
-
-**Debug Mode:**
-The example includes verbose logging that shows each step of the DAG execution, making it easy to understand the flow and debug issues.
diff --git a/examples/declarative_prompt_example.py b/examples/declarative_prompt_example.py
deleted file mode 100644
index cbf19fb3..00000000
--- a/examples/declarative_prompt_example.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-Example demonstrating the new declarative prompt configuration system.
-"""
-
-from talos.prompts.prompt_config import (
- PromptConfig,
- StaticPromptSelector,
- ConditionalPromptSelector
-)
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
-
-def example_static_configuration():
- """Example of static prompt configuration (backward compatibility)."""
- config = PromptConfig(
- selector=StaticPromptSelector(
- prompt_names=["main_agent_prompt", "general_agent_prompt"]
- )
- )
-
- print("Static configuration prompt names:", config.get_prompt_names({}))
-
-
-def example_conditional_configuration():
- """Example of conditional prompt configuration."""
- config = PromptConfig(
- selector=ConditionalPromptSelector(
- conditions={
- "has_voice_analysis": "voice_enhanced_agent_prompt",
- "is_proposal_context": "proposal_evaluation_prompt",
- "is_github_context": "github_pr_review_prompt"
- },
- default_prompt="main_agent_prompt"
- ),
- variables={
- "system_mode": "autonomous",
- "safety_level": "high"
- },
- transformations={
- "system_mode": "uppercase"
- }
- )
-
- context_with_voice = {"has_voice_analysis": True}
- context_github = {"is_github_context": True}
- context_default = {}
-
- print("Voice context prompts:", config.get_prompt_names(context_with_voice))
- print("GitHub context prompts:", config.get_prompt_names(context_github))
- print("Default context prompts:", config.get_prompt_names(context_default))
-
-
-def example_with_prompt_manager():
- """Example using the new system with a prompt manager."""
- import tempfile
- import os
- import json
-
- with tempfile.TemporaryDirectory() as temp_dir:
- prompt_file = os.path.join(temp_dir, "test_prompt.json")
- with open(prompt_file, 'w') as f:
- json.dump({
- "name": "test_prompt",
- "description": "Test prompt",
- "template": "Hello {name}, mode: {mode}!",
- "input_variables": ["name", "mode"]
- }, f)
-
- manager = FilePromptManager(prompts_dir=temp_dir)
-
- config = PromptConfig(
- selector=StaticPromptSelector(prompt_names=["test_prompt"]),
- variables={"name": "world", "mode": "test"},
- transformations={"mode": "uppercase"}
- )
-
- context = {}
- result = manager.get_prompt_with_config(config, context)
-
- if result:
- print("Enhanced template:", result.template)
- else:
- print("Failed to get prompt")
-
-
-if __name__ == "__main__":
- print("=== Declarative Prompt Configuration Examples ===\n")
-
- print("1. Static Configuration:")
- example_static_configuration()
- print()
-
- print("2. Conditional Configuration:")
- example_conditional_configuration()
- print()
-
- print("3. With Prompt Manager:")
- example_with_prompt_manager()
diff --git a/examples/example.py b/examples/example.py
deleted file mode 100644
index e9497e35..00000000
--- a/examples/example.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from pydantic import BaseModel
-
-from .base import SupervisedTool, Supervisor
-
-
-class AlternatingSupervisor(Supervisor[Any]):
- """
- A supervisor that alternates between allowing and denying tool invocations.
- """
-
- def __init__(self) -> None:
- self.counter = 0
-
- def supervise(self, invocation: Any) -> tuple[bool, str]:
- if self.counter % 2 == 0:
- self.counter += 1
- return False, "Blocked by AlternatingSupervisor"
- self.counter += 1
- return True, ""
-
-
-class ExampleTool(SupervisedTool):
- """
- An example tool that can be supervised.
- """
-
- name: str = "example_tool"
- description: str = "An example tool that can be supervised"
- args_schema: type[BaseModel] = BaseModel
-
- def _run_unsupervised(self, *args: Any, **kwargs: Any) -> Any:
- return "Hello, world!"
diff --git a/examples/extensible_framework_demo.py b/examples/extensible_framework_demo.py
deleted file mode 100644
index 1da04258..00000000
--- a/examples/extensible_framework_demo.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python3
-"""
-Demo script showing the structured LangGraph framework for blockchain-native AI.
-
-This script demonstrates:
-1. Creating a StructuredMainAgent with deterministic DAG structure
-2. Individual node upgrades with version compatibility checks
-3. Blockchain-native serialization and state management
-4. Deterministic delegation patterns with hash-based routing
-5. Node rollback and upgrade validation
-"""
-
-import os
-from pathlib import Path
-
-from langchain_openai import ChatOpenAI
-
-from talos.core.extensible_agent import StructuredMainAgent, SupportAgent
-from talos.dag.structured_nodes import NodeVersion
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-
-
-class AnalyticsSkill(Skill):
- """A skill for data analytics and reporting."""
-
- @property
- def name(self) -> str:
- return "analytics"
-
- def run(self, **kwargs) -> str:
- query = kwargs.get("current_query", "No query provided")
- agent_analysis = kwargs.get("agent_analysis", "No analysis")
-
- result = f"Analytics skill executed: '{query}'"
- if agent_analysis:
- result += f" with agent analysis: {agent_analysis[:100]}..."
-
- return result
-
-
-class ResearchSkill(Skill):
- """A skill for research and information gathering."""
-
- @property
- def name(self) -> str:
- return "research"
-
- def run(self, **kwargs) -> str:
- query = kwargs.get("current_query", "No query provided")
- agent_domain = kwargs.get("agent_domain", "unknown")
-
- result = f"Research skill executed for {agent_domain} domain: '{query}'"
-
- return result
-
-
-def main():
- """Demonstrate the structured framework capabilities."""
- print("🔗 Talos Structured Blockchain Framework Demo")
- print("=" * 50)
-
- model = ChatOpenAI(model="gpt-4o-mini")
- prompts_dir = Path(__file__).parent.parent / "src" / "talos" / "prompts"
- prompt_manager = FilePromptManager(str(prompts_dir))
-
- print("\n1. Creating StructuredMainAgent...")
- agent = StructuredMainAgent(
- model=model,
- prompts_dir=str(prompts_dir),
- prompt_manager=prompt_manager,
- verbose=True,
- use_database_memory=False
- )
-
- print(f"✅ Structured agent created with {len(agent.support_agents)} default support agents")
-
- print("\n2. Initial Structured DAG Status:")
- status = agent.get_structured_status()
- if 'dag_name' in status:
- print(f" DAG name: {status['dag_name']}")
- print(f" DAG version: {status['dag_version']}")
- print(f" Total nodes: {status['total_nodes']}")
- print(f" Delegation hash: {status['delegation_hash']}")
- print(f" Blockchain ready: {status['blockchain_ready']}")
-
- for node_id, info in status['structured_nodes'].items():
- print(f" - {node_id}: v{info['version']} (hash: {info['node_hash'][:8]}...)")
- else:
- print(f" Status: {status}")
- print(" ⚠️ Structured DAG not properly initialized")
-
- print("\n3. Testing individual node status...")
- for domain in ["governance", "analytics"]:
- node_status = agent.get_node_status(domain)
- if "error" not in node_status:
- print(f" {domain}: v{node_status['version']} - {node_status['upgrade_policy']} policy")
- print(f" Keywords: {node_status['delegation_keywords']}")
- else:
- print(f" {domain}: {node_status['error']}")
-
- print("\n4. Testing node upgrade validation...")
- new_version = NodeVersion(major=1, minor=1, patch=0)
- validation = agent.validate_upgrade("governance", new_version)
- print(f" Governance upgrade to v{new_version}: {validation}")
-
- incompatible_version = NodeVersion(major=2, minor=0, patch=0)
- validation = agent.validate_upgrade("governance", incompatible_version)
- print(f" Governance upgrade to v{incompatible_version}: {validation}")
-
- print("\n5. Testing blockchain serialization...")
- blockchain_data = agent.export_for_blockchain()
- if blockchain_data:
- print(f" DAG version: {blockchain_data.get('dag_version')}")
- print(f" Checksum: {blockchain_data.get('checksum', '')[:16]}...")
- print(f" Nodes: {len(blockchain_data.get('nodes', {}))}")
- print(f" Edges: {len(blockchain_data.get('edges', []))}")
- else:
- print(" ❌ Blockchain export failed")
-
- print("\n6. Testing structured delegation...")
-
- try:
- result = agent.delegate_task("Analyze governance proposal for voting")
- print(f" ✅ Governance delegation: {str(result)[:100]}...")
- except Exception as e:
- print(f" ❌ Governance delegation failed: {e}")
-
- try:
- result = agent.delegate_task("Generate analytics report on user data")
- print(f" ✅ Analytics delegation: {str(result)[:100]}...")
- except Exception as e:
- print(f" ❌ Analytics delegation failed: {e}")
-
- print("\n7. Testing node upgrade (compatible version)...")
- new_governance_agent = SupportAgent(
- name="governance_v2",
- domain="governance",
- description="Enhanced governance agent with improved consensus",
- architecture={
- "task_flow": ["validate", "analyze", "simulate", "execute", "confirm"],
- "decision_points": ["proposal_validity", "consensus_mechanism", "execution_safety", "rollback_plan"],
- "capabilities": ["proposal_validation", "consensus_coordination", "safe_execution", "simulation"]
- },
- delegation_keywords=["governance", "proposal", "vote", "consensus", "dao"],
- task_patterns=["validate proposal", "coordinate consensus", "execute governance", "simulate outcome"]
- )
-
- upgrade_version = NodeVersion(major=1, minor=1, patch=0)
- success = agent.upgrade_support_agent("governance", new_governance_agent, upgrade_version)
- if success:
- print(f" ✅ Successfully upgraded governance agent to v{upgrade_version}")
- updated_status = agent.get_node_status("governance")
- print(f" New version: {updated_status['version']}")
- print(f" New hash: {updated_status['node_hash'][:8]}...")
- else:
- print(" ❌ Failed to upgrade governance agent")
-
- print("\n8. Testing rollback capability...")
- rollback_version = NodeVersion(major=1, minor=0, patch=0)
- rollback_success = agent.rollback_node("governance", rollback_version)
- if rollback_success:
- print(f" ✅ Successfully rolled back governance agent to v{rollback_version}")
- else:
- print(" ❌ Failed to rollback governance agent")
-
- print("\n9. Final DAG Status:")
- final_status = agent.get_structured_status()
- print(f" Total nodes: {final_status['total_nodes']}")
- print(f" Delegation hash: {final_status['delegation_hash']}")
-
- for node_id, info in final_status['structured_nodes'].items():
- print(f" - {node_id}: v{info['version']} (policy: {info['upgrade_policy']})")
-
- print("\n10. DAG Visualization:")
- try:
- viz = agent.get_dag_visualization()
- print(viz)
- except Exception as e:
- print(f" DAG not available: {e}")
-
- print("\n🎉 Structured Framework Demo completed!")
- print("\nKey Features Demonstrated:")
- print("✅ Structured DAG structure with versioned nodes")
- print("✅ Individual node identification and upgrade")
- print("✅ Version compatibility validation")
- print("✅ Blockchain-native serialization")
- print("✅ Deterministic delegation with hash-based routing")
- print("✅ Node rollback capabilities")
- print("✅ Single component upgrade for blockchain AI")
-
- print("\n📋 Architecture Benefits:")
- print("🔹 Deterministic execution for reproducible results")
- print("🔹 Individual component upgrades without system downtime")
- print("🔹 Hash-based verification for integrity assurance")
- print("🔹 Blockchain-compatible serialization format")
- print("🔹 Safe upgrade paths with rollback capabilities")
- print("🔹 Comprehensive monitoring and status reporting")
-
- print("\n🚀 Next Steps:")
- print("• Integrate with blockchain storage systems")
- print("• Implement automated upgrade pipelines")
- print("• Add custom support agents for specific domains")
- print("• Deploy in production environments")
- print("• Monitor system performance and upgrade patterns")
-
-
-if __name__ == "__main__":
- if not os.getenv("OPENAI_API_KEY"):
- print("❌ OPENAI_API_KEY environment variable is required")
- exit(1)
-
- main()
diff --git a/examples/langgraph_interactive_example.py b/examples/langgraph_interactive_example.py
deleted file mode 100644
index d6290aa5..00000000
--- a/examples/langgraph_interactive_example.py
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env python3
-"""
-Interactive LangGraph Agent Example
-
-This example demonstrates a multi-step DAG (Directed Acyclic Graph) execution
-with branching logic using LangGraph. The agent can handle different types of
-queries and route them through appropriate processing nodes.
-
-Features demonstrated:
-- Multi-step DAG execution with StateGraph
-- Conditional branching based on query content
-- Memory persistence with MemorySaver
-- Multiple node types (Router, Analysis, Processing, Output)
-- Interactive CLI for testing different scenarios
-
-Usage:
- python examples/langgraph_interactive_example.py
-"""
-
-import asyncio
-from typing import Any, Dict, List, Literal, TypedDict
-
-from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
-from langchain_openai import ChatOpenAI
-from langgraph.checkpoint.memory import MemorySaver
-from langgraph.graph import StateGraph, START, END
-
-
-class AgentState(TypedDict):
- """State that flows through the DAG nodes."""
- messages: List[BaseMessage]
- query: str
- query_type: str
- analysis_result: Dict[str, Any]
- processing_result: Dict[str, Any]
- final_output: str
- metadata: Dict[str, Any]
-
-
-class InteractiveLangGraphAgent:
- """
- Interactive LangGraph agent demonstrating multi-step DAG execution with branching.
-
- The DAG flow:
- START -> query_analyzer -> router -> [sentiment_processor | proposal_processor | general_processor] -> output_formatter -> END
- """
-
- def __init__(self, model_name: str = "gpt-4o-mini"):
- self.model = ChatOpenAI(model=model_name, temperature=0.1)
- self.checkpointer = MemorySaver()
- self.graph = self._build_graph()
-
- def _build_graph(self) -> StateGraph:
- """Build the LangGraph StateGraph with multiple nodes and conditional routing."""
- workflow = StateGraph(AgentState)
-
- workflow.add_node("query_analyzer", self._analyze_query)
- workflow.add_node("router", self._route_query)
- workflow.add_node("sentiment_processor", self._process_sentiment_query)
- workflow.add_node("proposal_processor", self._process_proposal_query)
- workflow.add_node("general_processor", self._process_general_query)
- workflow.add_node("output_formatter", self._format_output)
-
- workflow.add_edge(START, "query_analyzer")
- workflow.add_edge("query_analyzer", "router")
-
- workflow.add_conditional_edges(
- "router",
- self._determine_next_node,
- {
- "sentiment": "sentiment_processor",
- "proposal": "proposal_processor",
- "general": "general_processor"
- }
- )
-
- workflow.add_edge("sentiment_processor", "output_formatter")
- workflow.add_edge("proposal_processor", "output_formatter")
- workflow.add_edge("general_processor", "output_formatter")
-
- workflow.add_edge("output_formatter", END)
-
- return workflow.compile(checkpointer=self.checkpointer)
-
- async def _analyze_query(self, state: AgentState) -> AgentState:
- """Step 1: Analyze the incoming query to understand its intent and content."""
- query = state["query"]
-
- analysis_prompt = f"""
- Analyze the following query and provide structured analysis:
- Query: "{query}"
-
- Please analyze:
- 1. Main topic/subject
- 2. Intent (what the user wants to accomplish)
- 3. Key entities mentioned
- 4. Sentiment/tone
- 5. Complexity level (simple/moderate/complex)
-
- Provide your analysis in a structured format.
- """
-
- response = await self.model.ainvoke([HumanMessage(content=analysis_prompt)])
-
- analysis_result = {
- "raw_analysis": response.content,
- "timestamp": "2025-01-31T04:13:49Z",
- "model_used": self.model.model_name
- }
-
- state["analysis_result"] = analysis_result
- state["messages"].append(AIMessage(content=f"Query analyzed: {query[:100]}..."))
-
- print(f"🔍 ANALYSIS STEP: Analyzed query - {query[:50]}...")
- return state
-
- async def _route_query(self, state: AgentState) -> AgentState:
- """Step 2: Route the query to appropriate processor based on content analysis."""
- query = state["query"].lower()
-
- if any(keyword in query for keyword in ["sentiment", "feeling", "opinion", "twitter", "social"]):
- query_type = "sentiment"
- elif any(keyword in query for keyword in ["proposal", "governance", "vote", "dao", "protocol"]):
- query_type = "proposal"
- else:
- query_type = "general"
-
- state["query_type"] = query_type
- state["messages"].append(AIMessage(content=f"Query routed to: {query_type} processor"))
-
- print(f"🔀 ROUTING STEP: Query type determined as '{query_type}'")
- return state
-
- def _determine_next_node(self, state: AgentState) -> Literal["sentiment", "proposal", "general"]:
- """Conditional edge function that determines which processor to use."""
- return state["query_type"]
-
- async def _process_sentiment_query(self, state: AgentState) -> AgentState:
- """Step 3a: Process sentiment-related queries with specialized logic."""
- query = state["query"]
-
- sentiment_prompt = f"""
- You are a sentiment analysis specialist. Analyze the following query for sentiment-related insights:
- Query: "{query}"
-
- Provide:
- 1. Overall sentiment classification (positive/negative/neutral)
- 2. Emotional indicators found
- 3. Confidence level in your analysis
- 4. Recommendations for response tone
- 5. Key sentiment-bearing phrases
-
- Format your response as a structured analysis.
- """
-
- response = await self.model.ainvoke([HumanMessage(content=sentiment_prompt)])
-
- processing_result = {
- "processor_type": "sentiment",
- "analysis": response.content,
- "specialized_insights": "Sentiment patterns and emotional indicators identified",
- "confidence_score": 0.85
- }
-
- state["processing_result"] = processing_result
- state["messages"].append(AIMessage(content="Sentiment analysis completed"))
-
- print("💭 SENTIMENT PROCESSING: Analyzed emotional content and sentiment patterns")
- return state
-
- async def _process_proposal_query(self, state: AgentState) -> AgentState:
- """Step 3b: Process governance/proposal-related queries with specialized logic."""
- query = state["query"]
-
- proposal_prompt = f"""
- You are a governance and proposal analysis expert. Analyze the following query:
- Query: "{query}"
-
- Provide:
- 1. Governance implications
- 2. Stakeholder impact assessment
- 3. Risk factors to consider
- 4. Implementation complexity
- 5. Community considerations
- 6. Recommended evaluation criteria
-
- Format your response as a comprehensive governance analysis.
- """
-
- response = await self.model.ainvoke([HumanMessage(content=proposal_prompt)])
-
- processing_result = {
- "processor_type": "proposal",
- "analysis": response.content,
- "specialized_insights": "Governance implications and stakeholder impact assessed",
- "risk_level": "moderate"
- }
-
- state["processing_result"] = processing_result
- state["messages"].append(AIMessage(content="Proposal analysis completed"))
-
- print("🏛️ PROPOSAL PROCESSING: Analyzed governance implications and stakeholder impact")
- return state
-
- async def _process_general_query(self, state: AgentState) -> AgentState:
- """Step 3c: Process general queries with broad analytical approach."""
- query = state["query"]
-
- general_prompt = f"""
- You are a general purpose AI assistant. Provide a comprehensive response to:
- Query: "{query}"
-
- Provide:
- 1. Direct answer to the query
- 2. Additional context that might be helpful
- 3. Related topics or considerations
- 4. Actionable recommendations if applicable
- 5. Follow-up questions that might be relevant
-
- Format your response to be helpful and informative.
- """
-
- response = await self.model.ainvoke([HumanMessage(content=general_prompt)])
-
- processing_result = {
- "processor_type": "general",
- "analysis": response.content,
- "specialized_insights": "General analysis with broad contextual understanding",
- "completeness_score": 0.90
- }
-
- state["processing_result"] = processing_result
- state["messages"].append(AIMessage(content="General analysis completed"))
-
- print("🔧 GENERAL PROCESSING: Provided comprehensive analysis and recommendations")
- return state
-
- async def _format_output(self, state: AgentState) -> AgentState:
- """Step 4: Format the final output combining all processing results."""
- query = state["query"]
- analysis = state["analysis_result"]
- processing = state["processing_result"]
-
- output_prompt = f"""
- Create a final formatted response based on the following processing pipeline:
-
- Original Query: "{query}"
- Query Type: {state["query_type"]}
- Analysis Result: {analysis["raw_analysis"][:200]}...
- Processing Result: {processing["analysis"][:200]}...
-
- Create a well-structured final response that:
- 1. Directly addresses the user's query
- 2. Incorporates insights from the analysis phase
- 3. Includes specialized processing results
- 4. Provides clear, actionable information
- 5. Maintains appropriate tone for the query type
-
- Format as a professional, helpful response.
- """
-
- response = await self.model.ainvoke([HumanMessage(content=output_prompt)])
-
- state["final_output"] = response.content
- state["messages"].append(AIMessage(content="Final output formatted and ready"))
-
- print("📋 OUTPUT FORMATTING: Created final structured response")
- return state
-
- async def process_query(self, query: str, thread_id: str = "default") -> Dict[str, Any]:
- """Process a query through the complete DAG pipeline."""
- print(f"\n🚀 Starting DAG execution for query: '{query[:50]}...'")
- print("=" * 70)
-
- initial_state: AgentState = {
- "messages": [HumanMessage(content=query)],
- "query": query,
- "query_type": "",
- "analysis_result": {},
- "processing_result": {},
- "final_output": "",
- "metadata": {
- "thread_id": thread_id,
- "start_time": "2025-01-31T04:13:49Z"
- }
- }
-
- config = {"configurable": {"thread_id": thread_id}}
-
- try:
- final_state = await self.graph.ainvoke(initial_state, config=config)
-
- print("=" * 70)
- print("✅ DAG execution completed successfully!")
-
- return {
- "success": True,
- "query": query,
- "query_type": final_state["query_type"],
- "final_output": final_state["final_output"],
- "processing_steps": len(final_state["messages"]),
- "metadata": final_state["metadata"]
- }
-
- except Exception as e:
- print(f"❌ DAG execution failed: {str(e)}")
- return {
- "success": False,
- "error": str(e),
- "query": query
- }
-
- def get_graph_visualization(self) -> str:
- """Get a text visualization of the DAG structure."""
- return """
- LangGraph DAG Structure:
-
- START
- ↓
- query_analyzer (Step 1: Analyze query intent and content)
- ↓
- router (Step 2: Determine processing path)
- ↓
- ┌─────────────────┬─────────────────┬─────────────────┐
- ↓ ↓ ↓ ↓
- sentiment_processor proposal_processor general_processor
- (Step 3a: Sentiment) (Step 3b: Governance) (Step 3c: General)
- ↓ ↓ ↓
- └─────────────────┼─────────────────┘
- ↓
- output_formatter (Step 4: Format final response)
- ↓
- END
-
- Branching Logic:
- - Sentiment keywords → sentiment_processor
- - Governance keywords → proposal_processor
- - Everything else → general_processor
- """
-
-
-async def interactive_demo():
- """Run an interactive demonstration of the LangGraph agent."""
- print("🤖 Interactive LangGraph Agent Demo")
- print("=" * 50)
- print("This demo shows a multi-step DAG with conditional branching.")
- print("The agent will route your queries through different processing paths.\n")
-
- agent = InteractiveLangGraphAgent()
-
- print(agent.get_graph_visualization())
- print("\n" + "=" * 50)
-
- example_queries = [
- "What's the sentiment around the latest protocol update on Twitter?",
- "I need help evaluating this governance proposal for the DAO",
- "Can you explain how blockchain consensus mechanisms work?",
- "The community seems really excited about the new features!",
- "What are the risks of implementing this treasury management proposal?"
- ]
-
- print("Example queries to try:")
- for i, query in enumerate(example_queries, 1):
- print(f"{i}. {query}")
- print("6. Enter your own custom query")
- print("0. Exit")
-
- while True:
- print("\n" + "-" * 50)
- choice = input("Select an option (0-6): ").strip()
-
- if choice == "0":
- print("👋 Goodbye!")
- break
- elif choice in ["1", "2", "3", "4", "5"]:
- query = example_queries[int(choice) - 1]
- print(f"\n🎯 Selected query: {query}")
- elif choice == "6":
- query = input("Enter your custom query: ").strip()
- if not query:
- print("❌ Empty query, please try again.")
- continue
- else:
- print("❌ Invalid choice, please try again.")
- continue
-
- result = await agent.process_query(query, thread_id=f"demo_{choice}")
-
- if result["success"]:
- print("\n📊 RESULTS:")
- print(f"Query Type: {result['query_type']}")
- print(f"Processing Steps: {result['processing_steps']}")
- print("\n📝 Final Response:")
- print("-" * 30)
- print(result["final_output"])
- print("-" * 30)
- else:
- print(f"\n❌ Error: {result['error']}")
-
-
-def main():
- """Main entry point for the interactive example."""
- try:
- asyncio.run(interactive_demo())
- except KeyboardInterrupt:
- print("\n\n👋 Demo interrupted by user. Goodbye!")
- except Exception as e:
- print(f"\n❌ Demo failed: {str(e)}")
- print("Make sure you have OPENAI_API_KEY set in your environment.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/examples/proposal_example.py b/examples/proposal_example.py
deleted file mode 100644
index 1e7f1c91..00000000
--- a/examples/proposal_example.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-
-from langchain_openai import ChatOpenAI
-
-from talos.models.proposals import Feedback, Proposal
-from talos.services.implementations import ProposalsService
-
-
-def run_proposal_example():
- """
- Runs an example of the agent evaluating a proposal.
- """
- # Initialize the proposals service
- service = ProposalsService(llm=ChatOpenAI(model="gpt-5", openai_api_key=os.environ.get("OPENAI_API_KEY", "")))
-
- # Define the proposal
- proposal = Proposal(
- proposal_text="""
- **Proposal: Invest in a new DeFi protocol**
-
- **Description:**
- This proposal suggests that the treasury invest 10% of its assets in a
- new DeFi protocol called "SuperYield". The protocol promises high
- returns through a novel liquidity mining strategy.
-
- **Justification:**
- Investing in SuperYield could significantly increase the treasury's
- returns and diversify its portfolio.
-
- **Risks:**
- As a new protocol, SuperYield has a limited track record and may be
- vulnerable to smart contract exploits.
- """,
- feedback=[
- Feedback(
- delegate="Alice",
- feedback="I'm concerned about the security risks. Have there been any independent security audits?",
- ),
- Feedback(
- delegate="Bob",
- feedback="The potential returns are very attractive. I think it's worth the risk.",
- ),
- ],
- )
-
- # Evaluate the proposal
- response = service.evaluate_proposal(proposal)
-
- # Print the agent's recommendation
- print("--- Agent's Recommendation ---")
- print(response.answers[0])
-
-
-if __name__ == "__main__":
- run_proposal_example()
diff --git a/examples/talos_tool_example.py b/examples/talos_tool_example.py
deleted file mode 100644
index e59123d3..00000000
--- a/examples/talos_tool_example.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from __future__ import annotations
-
-import datetime
-from typing import TYPE_CHECKING, Any
-
-from langchain_core.tools import tool
-
-from talos.hypervisor.supervisor import Supervisor
-
-if TYPE_CHECKING:
- from talos.core.agent import Agent
-
-
-@tool
-def get_current_time() -> str:
- """
- Returns the current time.
- """
- return datetime.datetime.now().isoformat()
-
-
-class SimpleSupervisor(Supervisor):
- """
- A simple supervisor that approves every other tool call.
- """
-
- counter: int = 0
-
- def set_agent(self, agent: "Agent"):
- """
- Sets the agent to be supervised.
- """
- pass
-
- def approve(self, action: str, args: dict[str, Any]) -> tuple[bool, str | None]:
- """
- Approves or denies an action.
- """
- self.counter += 1
- if self.counter % 2 == 0:
- return True, None
- return False, "Denied by SimpleSupervisor"
diff --git a/examples/tool_example.py b/examples/tool_example.py
deleted file mode 100644
index 4cbe02c6..00000000
--- a/examples/tool_example.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from __future__ import annotations
-
-from talos.tools import SimpleSupervisor, get_current_time
-from talos.tools.supervised_tool import SupervisedTool
-
-
-def main():
- """
- An example of how to use the SupervisedTool.
- """
- # Create a supervisor
- supervisor = SimpleSupervisor()
-
- # Create a tool
- tool = get_current_time
- print(f"Tool: {tool.name}")
-
- # Create a supervised tool
- supervised_tool = SupervisedTool(
- tool=tool,
- supervisor=supervisor,
- name=tool.name,
- description=tool.description,
- args_schema=tool.args_schema,
- messages=[],
- )
-
- # The first call should be approved
- print("First call (should be approved):")
- result = supervised_tool.invoke({})
- print(f"Result: {result}")
-
- # The second call should be denied
- print("\nSecond call (should be denied):")
- result = supervised_tool.invoke({})
- print(f"Result: {result}")
-
- # The third call should be approved
- print("\nThird call (should be approved):")
- result = supervised_tool.invoke({})
- print(f"Result: {result}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/integration_tests/README.md b/integration_tests/README.md
deleted file mode 100644
index ddc79802..00000000
--- a/integration_tests/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Integration Tests
-
-This directory contains comprehensive integration tests for the Talos agent that verify end-to-end functionality.
-
-## Running Integration Tests
-
-### Run all integration tests:
-```bash
-uv run pytest integration_tests/ -v
-```
-
-### Run specific test files:
-```bash
-uv run pytest integration_tests/test_memory_integration.py -v
-uv run pytest integration_tests/test_memory_tool_availability.py -v
-uv run pytest integration_tests/test_cli_memory.py -v
-uv run pytest integration_tests/test_memory_prompt_fix.py -v
-```
-
-## Test Coverage
-
-### Memory Functionality Tests
-- **test_memory_integration.py**: Core memory tool registration and storage/retrieval
-- **test_memory_tool_availability.py**: Memory tool availability and binding verification
-- **test_cli_memory.py**: CLI-based memory functionality testing
-- **test_memory_prompt_fix.py**: Prompt engineering verification for automatic memory usage
-
-## Configuration
-
-These tests are excluded from the default pytest run and GitHub Actions CI to keep the main test suite fast and focused. They can be run manually when needed for comprehensive verification.
-
-## Test Requirements
-
-- OpenAI API key (for LLM model testing)
-- Full Talos environment setup
-- May create temporary files and directories during execution
diff --git a/integration_tests/__init__.py b/integration_tests/__init__.py
deleted file mode 100644
index 1ae940f0..00000000
--- a/integration_tests/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-Integration tests for Talos agent functionality.
-
-These tests are more comprehensive and may require external dependencies.
-They are skipped by default in CI but can be run manually for thorough testing.
-
-To run integration tests:
- uv run pytest integration_tests/ -v
-
-To run specific integration test:
- uv run pytest integration_tests/test_memory_integration.py -v
-"""
diff --git a/integration_tests/test_cli_memory.py b/integration_tests/test_cli_memory.py
deleted file mode 100644
index a2068520..00000000
--- a/integration_tests/test_cli_memory.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/env python3
-"""
-CLI-based memory functionality test script.
-Tests memory functionality through the actual CLI interface.
-"""
-
-import subprocess
-import tempfile
-import json
-import time
-from pathlib import Path
-
-
-def run_cli_command(command, input_text=None, timeout=30):
- """Run a CLI command and return the output."""
- try:
- process = subprocess.Popen(
- command,
- shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- cwd="."
- )
-
- stdout, stderr = process.communicate(input=input_text, timeout=timeout)
- return process.returncode, stdout, stderr
- except subprocess.TimeoutExpired:
- process.kill()
- return -1, "", "Command timed out"
-
-
-def test_memory_cli_functionality():
- """Test memory functionality through CLI commands."""
- print("Testing Talos CLI Memory Functionality")
- print("=" * 50)
-
- temp_dir = tempfile.mkdtemp()
- user_id = "cli-test-user"
-
- try:
- print(f"Using temp directory: {temp_dir}")
- print(f"Using user ID: {user_id}")
-
- print("\n1. Testing CLI availability...")
- returncode, stdout, stderr = run_cli_command("uv run talos --help")
- if returncode == 0:
- print("✓ CLI is available and working")
- else:
- print(f"✗ CLI failed: {stderr}")
- assert False, f"CLI failed: {stderr}"
-
- print("\n2. Testing initial memory state...")
- cmd = f"uv run talos memory list --user-id {user_id}"
- returncode, stdout, stderr = run_cli_command(cmd, timeout=30)
- if returncode == 0:
- print("✓ Memory list command works")
- print(f"Initial memories: {stdout.strip()}")
- else:
- print(f"✗ Memory list failed: {stderr}")
-
- print("\n3. Testing interactive conversation...")
- cmd = f"uv run talos main --user-id {user_id} --verbose"
- input_text = "I like pizza\n"
-
- print(f"Sending input: '{input_text.strip()}'")
- returncode, stdout, stderr = run_cli_command(cmd, input_text, timeout=60)
-
- print(f"Return code: {returncode}")
- print(f"Output: {stdout}")
- if stderr:
- print(f"Errors: {stderr}")
-
- print("\n4. Checking if memory was stored...")
- time.sleep(1) # Give time for memory to be saved
-
- cmd = f"uv run talos memory list --user-id {user_id}"
- returncode, stdout, stderr = run_cli_command(cmd)
-
- if returncode == 0:
- print("✓ Memory list after conversation:")
- print(stdout)
-
- if "pizza" in stdout.lower():
- print("✓ Pizza preference was stored in memory!")
- else:
- print("✗ Pizza preference not found in memory")
- else:
- print(f"✗ Failed to list memories: {stderr}")
-
- print("\n5. Testing memory search...")
- cmd = f"uv run talos memory search 'pizza' --user-id {user_id}"
- returncode, stdout, stderr = run_cli_command(cmd)
-
- if returncode == 0:
- print("✓ Memory search works:")
- print(stdout)
- else:
- print(f"✗ Memory search failed: {stderr}")
-
- print("\n6. Checking memory file contents...")
- memory_file = Path("memory/memories.json")
- if memory_file.exists():
- try:
- with open(memory_file, 'r') as f:
- memory_data = json.load(f)
- print(f"✓ Memory file exists with {len(memory_data.get('memories', []))} memories")
-
- for memory in memory_data.get('memories', []):
- if memory.get('user_id') == user_id:
- print(f" - {memory.get('description', 'No description')}")
- except Exception as e:
- print(f"✗ Failed to read memory file: {e}")
- else:
- print("✗ Memory file was not created")
-
- print("\n" + "=" * 50)
- print("CLI Memory Test Completed")
-
- except Exception as e:
- print(f"✗ Test failed with exception: {e}")
- raise
-
- finally:
- import shutil
- shutil.rmtree(temp_dir, ignore_errors=True)
-
-
-def test_tool_invocation_detection():
- """Test if we can detect when memory tools are being invoked."""
- print("\nTesting Tool Invocation Detection")
- print("=" * 40)
-
-
- print("Checking memory tool infrastructure...")
-
- files_to_check = [
- "/home/ubuntu/repos/talos/src/talos/tools/memory_tool.py",
- "/home/ubuntu/repos/talos/src/talos/core/memory.py",
- "/home/ubuntu/repos/talos/src/talos/core/agent.py"
- ]
-
- for file_path in files_to_check:
- if Path(file_path).exists():
- print(f"✓ {file_path} exists")
- else:
- print(f"✗ {file_path} missing")
- assert False, f"{file_path} missing"
-
- print("Tool invocation detection test completed")
-
-
-if __name__ == "__main__":
- print("Talos Memory CLI Test Suite")
- print("=" * 60)
-
- success = test_memory_cli_functionality()
- test_tool_invocation_detection()
-
- if success:
- print("\n✓ CLI memory tests completed successfully")
- else:
- print("\n✗ CLI memory tests failed")
-
- print("\nNext steps:")
- print("1. Run: uv run pytest test_memory_integration.py -v")
- print("2. Run: uv run pytest tests/test_memory_tool.py -v")
- print("3. Check memory tool binding and automatic invocation")
diff --git a/integration_tests/test_memory_integration.py b/integration_tests/test_memory_integration.py
deleted file mode 100644
index b75d20a3..00000000
--- a/integration_tests/test_memory_integration.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python3
-"""
-Comprehensive memory functionality test for Talos agent.
-Tests memory tool availability, binding, and automatic invocation.
-"""
-
-import tempfile
-import shutil
-from pathlib import Path
-from langchain_openai import ChatOpenAI
-
-from src.talos.core.main_agent import MainAgent
-
-
-class TestMemoryIntegration:
- """Test memory tool integration and automatic invocation."""
-
- def setup_method(self):
- """Set up test environment."""
- self.temp_dir = tempfile.mkdtemp()
- self.memory_file = Path(self.temp_dir) / "test_memory.json"
-
- def teardown_method(self):
- """Clean up test environment."""
- shutil.rmtree(self.temp_dir, ignore_errors=True)
-
- def test_main_agent_memory_tool_registration(self):
- """Test that MainAgent properly registers memory tools."""
- model = ChatOpenAI(model="gpt-5", api_key="dummy-key")
-
- agent = MainAgent(
- model=model,
- prompts_dir="/home/ubuntu/repos/talos/src/talos/prompts",
- memory_file=str(self.memory_file)
- )
-
- if hasattr(agent, 'tool_manager') and agent.tool_manager:
- tool_names = list(agent.tool_manager.tools.keys())
- assert "add_memory" in tool_names, f"Memory tool not found in tools: {tool_names}"
- else:
- assert False, "Tool manager not initialized"
-
- def test_memory_storage_and_retrieval(self):
- """Test memory storage and retrieval functionality."""
- model = ChatOpenAI(model="gpt-5", api_key="dummy-key")
-
- agent = MainAgent(
- model=model,
- prompts_dir="/home/ubuntu/repos/talos/src/talos/prompts",
- memory_file=str(self.memory_file)
- )
-
- agent.memory.add_memory("User likes pizza")
-
- memories = agent.memory.search("pizza preferences")
- assert len(memories) > 0, "No memories found for pizza preferences"
- assert "pizza" in memories[0].description.lower(), "Pizza preference not stored correctly"
-
-
-if __name__ == "__main__":
- print("Memory Integration Test Suite")
- print("=" * 40)
-
- print("\nTo run full test suite:")
- print("uv run pytest test_memory_integration.py -v")
diff --git a/integration_tests/test_memory_prompt_fix.py b/integration_tests/test_memory_prompt_fix.py
deleted file mode 100644
index 0341821b..00000000
--- a/integration_tests/test_memory_prompt_fix.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python3
-"""
-Test script to verify that the prompt fix enables automatic memory tool usage.
-"""
-
-import tempfile
-import shutil
-from pathlib import Path
-
-from src.talos.core.main_agent import MainAgent
-
-
-def test_memory_tool_invocation():
- """Test that the agent now calls memory tools automatically with the updated prompt."""
- print("Testing Memory Tool Invocation with Updated Prompt")
- print("=" * 55)
-
- temp_dir = tempfile.mkdtemp()
- memory_file = Path(temp_dir) / "test_memory.json"
-
- try:
- from langchain_openai import ChatOpenAI
-
- model = ChatOpenAI(model="gpt-5", api_key="dummy-key")
-
- agent = MainAgent(
- model=model,
- prompts_dir="/home/ubuntu/repos/talos/src/talos/prompts",
- memory_file=str(memory_file)
- )
-
- print("✓ MainAgent initialized with updated prompt")
-
- prompt_content = agent.prompt_manager.get_prompt("main_agent_prompt").template
- if "memory" in prompt_content.lower():
- print("✓ Updated prompt mentions memory")
- assert True
- else:
- print("✗ Updated prompt still doesn't mention memory")
- assert False, "Updated prompt still doesn't mention memory"
-
- if "add_memory" in prompt_content.lower():
- print("✓ Updated prompt mentions add_memory tool")
- else:
- print("✗ Updated prompt doesn't mention add_memory tool")
- assert False, "Updated prompt doesn't mention add_memory tool"
-
- print("✓ Memory tool invocation test completed")
-
- except Exception as e:
- print(f"✗ Test failed: {e}")
- raise
-
- finally:
- shutil.rmtree(temp_dir, ignore_errors=True)
-
-
-def test_prompt_content():
- """Test that the prompt now contains memory-related instructions."""
- print("\nTesting Updated Prompt Content")
- print("=" * 35)
-
- try:
- import json
- prompt_file = Path("/home/ubuntu/repos/talos/src/talos/prompts/main_agent_prompt.json")
-
- with open(prompt_file, 'r') as f:
- prompt_data = json.load(f)
-
- template = prompt_data.get('template', '')
-
- memory_checks = [
- ("memory system", "memory system" in template.lower()),
- ("add_memory tool", "add_memory" in template.lower()),
- ("personal information", "personal information" in template.lower()),
- ("preferences", "preferences" in template.lower()),
- ("store information", "store" in template.lower())
- ]
-
- print("Memory-related content checks:")
- for check_name, result in memory_checks:
- status = "✓" if result else "✗"
- print(f"{status} {check_name}: {'Found' if result else 'Not found'}")
-
- if "### Memory and Personalization" in template:
- print("\n✓ Memory and Personalization section found")
-
- lines = template.split('\n')
- in_memory_section = False
- memory_section = []
-
- for line in lines:
- if "### Memory and Personalization" in line:
- in_memory_section = True
- elif line.startswith('###') and in_memory_section:
- break
- elif in_memory_section:
- memory_section.append(line)
-
- memory_text = '\n'.join(memory_section)
- print("Memory section content:")
- print(memory_text[:500] + "..." if len(memory_text) > 500 else memory_text)
- else:
- print("✗ Memory and Personalization section not found")
-
-
- except Exception as e:
- print(f"✗ Prompt content test failed: {e}")
- raise
-
-
-if __name__ == "__main__":
- print("Memory Prompt Fix Verification")
- print("=" * 50)
-
- try:
- test_prompt_content()
- test_memory_tool_invocation()
-
- print("\n" + "=" * 50)
- print("✓ Prompt fix verification completed successfully")
- print("\nNext steps:")
- print("1. Test with real CLI: uv run talos main --user-id test-user")
- print("2. Say 'I like pizza' and verify memory storage")
- print("3. Check memory persistence in follow-up conversations")
-
- except Exception as e:
- print(f"\n✗ Verification tests failed: {e}")
-
- print("\nExpected behavior:")
- print("- Agent should now proactively call add_memory tool")
- print("- Personal information should be stored automatically")
- print("- Agent should reference stored memories in future conversations")
diff --git a/integration_tests/test_memory_tool_availability.py b/integration_tests/test_memory_tool_availability.py
deleted file mode 100644
index dd1beeb3..00000000
--- a/integration_tests/test_memory_tool_availability.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python3
-"""
-Simple test to verify memory tool availability and binding in MainAgent.
-"""
-
-import tempfile
-import shutil
-from pathlib import Path
-from langchain_openai import ChatOpenAI
-
-from src.talos.core.main_agent import MainAgent
-
-
-def test_memory_tool_availability():
- """Test that memory tools are properly registered and available."""
- print("Testing Memory Tool Availability")
- print("=" * 40)
-
- temp_dir = tempfile.mkdtemp()
- memory_file = Path(temp_dir) / "test_memory.json"
-
- try:
- model = ChatOpenAI(model="gpt-5", api_key="dummy-key")
-
- agent = MainAgent(
- model=model,
- prompts_dir="/home/ubuntu/repos/talos/src/talos/prompts",
- memory_file=str(memory_file)
- )
-
- print("✓ MainAgent initialized successfully")
-
- if hasattr(agent, 'tool_manager') and agent.tool_manager:
- tool_names = list(agent.tool_manager.tools.keys())
- print(f"✓ Tool manager initialized with {len(tool_names)} tools")
- print(f"Available tools: {tool_names}")
-
- if "add_memory" in tool_names:
- print("✓ add_memory tool is registered")
-
- memory_tool = agent.tool_manager.tools["add_memory"]
- print(f"✓ Memory tool type: {type(memory_tool)}")
- print(f"✓ Memory tool description: {memory_tool.description}")
- else:
- print("✗ add_memory tool not found in tool manager")
- assert False, "add_memory tool not found"
- else:
- print("✗ Tool manager not initialized")
- assert False, "Tool manager not initialized"
-
- if hasattr(agent, 'memory') and agent.memory:
- print("✓ Memory system initialized")
-
- agent.memory.add_memory("Test memory: User likes pizza")
- print("✓ Memory addition works")
-
- memories = agent.memory.search("pizza")
- if memories:
- print(f"✓ Memory search works: found {len(memories)} memories")
- print(f" - {memories[0].description}")
- else:
- print("✗ Memory search failed")
- assert False, "Memory search failed"
- else:
- print("✗ Memory system not initialized")
- assert False, "Memory system not initialized"
-
- if hasattr(agent, 'tools'):
- base_tool_names = [tool.name for tool in agent.tools]
- print(f"✓ Base agent tools: {base_tool_names}")
- else:
- print("✗ Base agent tools not available")
-
- except Exception as e:
- print(f"✗ Test failed: {e}")
- raise
-
- finally:
- shutil.rmtree(temp_dir, ignore_errors=True)
-
-
-def test_prompt_analysis():
- """Analyze prompts for memory-related instructions."""
- print("\nAnalyzing Prompts for Memory Instructions")
- print("=" * 45)
-
- prompt_file = Path("/home/ubuntu/repos/talos/src/talos/prompts/main_agent_prompt.json")
-
- try:
- import json
- with open(prompt_file, 'r') as f:
- prompt_data = json.load(f)
-
- template = prompt_data.get('template', '')
-
- memory_keywords = ['memory', 'remember', 'store', 'personal', 'preference', 'tool']
- found_keywords = []
-
- for keyword in memory_keywords:
- if keyword.lower() in template.lower():
- found_keywords.append(keyword)
-
- print(f"✓ Prompt loaded successfully ({len(template)} characters)")
- print(f"Memory-related keywords found: {found_keywords}")
-
- if 'memory' in template.lower():
- print("✓ Prompt mentions 'memory'")
- else:
- print("✗ Prompt does not mention 'memory'")
- assert False, "Prompt does not mention 'memory'"
-
- if 'tool' in template.lower():
- print("✓ Prompt mentions 'tool'")
- else:
- print("✗ Prompt does not mention 'tool'")
-
- if 'user interaction' in template.lower():
- print("✓ Prompt has user interaction section")
- lines = template.split('\n')
- in_user_section = False
- user_section = []
-
- for line in lines:
- if 'user interaction' in line.lower():
- in_user_section = True
- elif line.startswith('##') and in_user_section:
- break
- elif in_user_section:
- user_section.append(line)
-
- user_text = '\n'.join(user_section)
- print("User interaction section:")
- print(user_text[:300] + "..." if len(user_text) > 300 else user_text)
-
- except Exception as e:
- print(f"✗ Prompt analysis failed: {e}")
- raise
-
-
-if __name__ == "__main__":
- print("Memory Tool Availability Test")
- print("=" * 50)
-
- success1 = test_memory_tool_availability()
- success2 = test_prompt_analysis()
-
- print("\n" + "=" * 50)
- if success1 and success2:
- print("✓ All tests completed successfully")
- print("\nFindings:")
- print("- Memory tools are properly registered in MainAgent")
- print("- Memory system works for direct storage and retrieval")
- print("- Prompts may need explicit memory tool usage instructions")
- else:
- print("✗ Some tests failed")
-
- print("\nRecommendations:")
- print("1. Add explicit memory tool usage instructions to agent prompts")
- print("2. Test with real LLM to see if tools are called automatically")
- print("3. Consider prompt engineering to encourage proactive memory usage")
diff --git a/kernel_agi_cli/README.md b/kernel_agi_cli/README.md
new file mode 100644
index 00000000..4cb02a92
--- /dev/null
+++ b/kernel_agi_cli/README.md
@@ -0,0 +1,98 @@
+# Kernel AGI - Chat de Console Interativo
+
+
+
+
+
+Esta é uma ferramenta de linha de comando (CLI) para interagir com a API da OpenRouter. Ela oferece uma experiência de chat rica em recursos, diretamente do seu terminal, incluindo:
+
+- **Streaming de Respostas:** Veja a resposta do modelo sendo "digitada" em tempo real.
+- **Histórico de Conversa:** Salve e carregue sessões de chat para continuar de onde parou.
+- **Comandos Especiais:** Mude de modelo, limpe o histórico, veja informações da sessão e muito mais, sem sair da interface.
+- **Métricas de Performance:** Monitore o uso de tokens, a latência e o custo estimado de cada chamada.
+- **Interface Colorida:** Saída formatada e fácil de ler.
+
+## Instalação
+
+### Pré-requisitos
+- Python 3.8 ou superior
+- Uma chave de API da [OpenRouter](https://openrouter.ai/keys)
+
+### Passos
+
+1. **Clone o repositório (ou tenha acesso a esta pasta):**
+ ```bash
+ # Exemplo, se o projeto estivesse em um repositório git
+ # git clone https://github.com/your-username/kernel-agi-cli.git
+ # cd kernel-agi-cli
+ ```
+
+2. **Instale o pacote em modo editável:**
+ O modo editável (`-e`) é recomendado, pois permite que você modifique o código-fonte e veja as alterações imediatamente, sem precisar reinstalar.
+ ```bash
+ pip install -e .
+ ```
+ Isso instalará as dependências (`aiohttp`, `colorama`) e criará o comando `kernel-chat` no seu ambiente.
+
+## Configuração
+
+A ferramenta requer que sua chave de API da OpenRouter esteja configurada como uma variável de ambiente.
+
+### Linux / macOS
+```bash
+export OPENROUTER_API_KEY="sua-chave-aqui"
+```
+
+### Windows (PowerShell)
+```powershell
+$env:OPENROUTER_API_KEY="sua-chave-aqui"
+```
+Você pode adicionar esta linha ao seu arquivo de perfil do shell (como `.bashrc`, `.zshrc` ou `profile.ps1`) para que a variável seja configurada permanentemente.
+
+## Uso
+
+Após a instalação, você pode iniciar o chat simplesmente executando:
+```bash
+kernel-chat
+```
+
+### Argumentos de Linha de Comando
+
+Você pode iniciar o chat com configurações específicas:
+
+- **Especificar um modelo:**
+ ```bash
+ kernel-chat --model "anthropic/claude-3.5-sonnet"
+ ```
+
+- **Carregar uma conversa salva:**
+ Os arquivos de conversa são salvos em `~/.openrouter_chat/`.
+ ```bash
+ kernel-chat --load "chat_20240728_103000.json"
+ ```
+
+### Comandos Internos
+
+Dentro do chat, você pode usar os seguintes comandos:
+
+- `/help`: Mostra a lista de todos os comandos disponíveis.
+- `/model`: Permite que você troque o modelo de IA durante a sessão.
+- `/save [nome_opcional]`: Salva a conversa atual. Se nenhum nome for fornecido, um nome com timestamp será gerado.
+- `/load `: Carrega uma sessão de chat anterior.
+- `/list`: Lista as últimas 10 conversas salvas.
+- `/history`: Mostra o histórico completo da sessão atual.
+- `/info`: Exibe informações sobre a sessão, como o modelo atual e a contagem de tokens.
+- `/clear`: Limpa o histórico da conversa atual, mantendo o prompt do sistema.
+- `/exit` ou `/quit`: Encerra a sessão de chat.
+
+### Comandos de Governança ($ALE-V)
+
+Esta ferramenta inclui um módulo para simular o poder de voto reputacional, combinando stake financeiro com Prova de Autoridade (PoA) baseada em métricas acadêmicas.
+
+- **/vpa `` ``**: Calcula o seu multiplicador de voto (PoA) com base no seu H-Index e número de citações.
+- **/vpa_decay `` `` ``**: Calcula o multiplicador PoA aplicando um fator de decaimento (de 0.0 a 1.0). Isso é útil para cenários onde a verificação das credenciais é fraca ou auto-relatada.
+- **/sim_voto `<$ALE>` `` `` `[Decaimento]`**: Simula o seu poder de voto final ($ALE-V), combinando seu stake de `$ALE` com seu multiplicador PoA. O fator de decaimento é opcional.
+
+## Contribuição
+
+Sinta-se à vontade para abrir issues ou pull requests para melhorar esta ferramenta.
\ No newline at end of file
diff --git a/kernel_agi_cli/aletheia-icon.svg b/kernel_agi_cli/aletheia-icon.svg
new file mode 100644
index 00000000..74a7dfc1
--- /dev/null
+++ b/kernel_agi_cli/aletheia-icon.svg
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/kernel_agi_cli/kernel_agi/main.py b/kernel_agi_cli/kernel_agi/main.py
new file mode 100644
index 00000000..92760e2d
--- /dev/null
+++ b/kernel_agi_cli/kernel_agi/main.py
@@ -0,0 +1,614 @@
+#!/usr/bin/env python3
+# ----------------------------------------------------------------------
+# Interactive Console Chat Interface (OpenRouter API) - Otimizado e Final
+#
+# Adiciona: Módulo de Degradação da Autoridade (PoAD) e novo comando /vpa_decay
+# ----------------------------------------------------------------------
+import aiohttp
+import asyncio
+import json
+import sys
+import os
+import time
+import math
+from datetime import datetime
+from pathlib import Path
+from typing import List, Dict, Optional, Tuple
+
+# Tenta importar colorama para cores no console
+try:
+ from colorama import init, Fore, Style
+ init(autoreset=True)
+ HAS_COLOR = True
+except ImportError:
+ HAS_COLOR = False
+ # Fallback sem cores
+ class Fore:
+ CYAN = GREEN = YELLOW = RED = MAGENTA = BLUE = ""
+ class Style:
+ BRIGHT = RESET_ALL = ""
+
+# ======================================================================
+# 🔧 CONFIGURAÇÃO E DADOS DE CUSTO (Estimativas Baseadas em Preços Públicos)
+# ======================================================================
+
+API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
+API_URL = "https://openrouter.ai/api/v1/chat/completions"
+
+# Headers obrigatórios para autenticação
+HEADERS = {
+ "Authorization": f"Bearer {API_KEY}",
+ "Content-Type": "application/json",
+ "HTTP-Referer": "https://github.com/your-username/openrouter-chat",
+ "X-Title": "Console Chat App - Kernel AGI"
+}
+
+# Configurações padrão
+DEFAULT_MODEL = "openai/gpt-4o-mini"
+TYPING_DELAY = 0.025
+MAX_HISTORY_MESSAGES = 20
+SAVE_DIR = Path.home() / ".openrouter_chat"
+
+# Modelos disponíveis (exemplos populares)
+AVAILABLE_MODELS = {
+ "1": ("openai/gpt-4o-mini", "GPT-4 Omni Mini - Rápido e eficiente"),
+ "2": ("openai/gpt-4o", "GPT-4 Omni - Mais poderoso"),
+ "3": ("anthropic/claude-3.5-sonnet", "Claude 3.5 Sonnet - Excelente raciocínio"),
+ "4": ("anthropic/claude-3-haiku", "Claude 3 Haiku - Rápido e econômico"),
+ "5": ("google/gemini-pro-1.5", "Gemini Pro 1.5 - Google AI"),
+ "6": ("meta-llama/llama-3.1-70b-instruct", "Llama 3.1 70B - Open source"),
+}
+
+# Estado global
+conversation_history: List[Dict[str, str]] = []
+current_model = DEFAULT_MODEL
+is_streaming = False
+
+# ======================================================================
+# 🎯 FUNÇÕES CHAVE: AUTORIDADE (PoA) E VOTO ($ALE-V)
+# ======================================================================
+
+# CUSTO POR 1M DE TOKENS (USD - Valores de Referência OpenRouter/API)
+MODEL_PRICING = {
+ "openai/gpt-4o-mini": {"input": 0.15, "output": 0.60},
+ "openai/gpt-4o": {"input": 5.00, "output": 15.00},
+ "anthropic/claude-3.5-sonnet": {"input": 3.00, "output": 15.00},
+ "anthropic/claude-3-haiku": {"input": 0.25, "output": 1.25},
+ "google/gemini-pro-1.5": {"input": 3.50, "output": 7.00},
+ "meta-llama/llama-3.1-70b-instruct": {"input": 0.50, "output": 0.70},
+}
+
+def get_reputational_multiplier(h_index: int, citations: int, verification_decay: float = 1.0) -> float:
+ """
+ Calcula o multiplicador de voto baseado em métricas acadêmicas (PoA),
+ aplicando um fator de decaimento se a prova for fraca (verification_decay < 1.0).
+ verification_decay: 1.0 = Prova 100% verificável; 0.5 = Prova falha.
+ """
+ if citations < 0 or h_index < 0:
+ return 1.0
+
+ # 1. Componente PoA Base (sempre >= 1.0)
+ volume_factor = math.log10(citations + 1)
+ quality_factor = h_index / 10.0
+
+ multiplier_base = 1.0 + (volume_factor * quality_factor)
+
+ # 2. Aplicação do Decaimento
+ # O decaimento afeta apenas a parte do multiplicador que excede 1.0 (o voto básico)
+ reputational_gain = multiplier_base - 1.0
+ multiplier = 1.0 + (reputational_gain * verification_decay)
+
+ # Cap em 5x
+ return min(multiplier, 5.0)
+
+def calculate_ale_v_power(staked_ale: float, h_index: int, citations: int, verification_decay: float = 1.0) -> Tuple[float, float]:
+ """
+ Calcula o poder de voto ($ALE-V) combinando stake financeiro e autoridade PoA.
+ Retorna (poder_de_voto, multiplicador_aplicado).
+ """
+ multiplier = get_reputational_multiplier(h_index, citations, verification_decay)
+ vote_power = staked_ale * multiplier
+
+ return vote_power, multiplier
+
+# ======================================================================
+# 🎨 FUNÇÕES DE UTILIDADE E CÁLCULO
+# ======================================================================
+
+def print_colored(text: str, color: str = Fore.CYAN, bright: bool = False, end: str = "\n"):
+ """Imprime texto colorido se colorama estiver disponível."""
+ style = Style.BRIGHT if bright else ""
+ print(f"{style}{color}{text}{Style.RESET_ALL}", end=end)
+
+def print_separator(char: str = "─", length: int = 70):
+ """Imprime uma linha separadora."""
+ print_colored(char * length, Fore.CYAN)
+
+def calculate_estimated_cost(model_id: str, prompt_tokens: int, completion_tokens: int) -> float:
+ """Calcula o custo estimado em USD com base na tabela de preços."""
+ prices = MODEL_PRICING.get(model_id)
+ if not prices:
+ return 0.0
+
+ cost_prompt = (prompt_tokens / 1_000_000) * prices["input"]
+ cost_completion = (completion_tokens / 1_000_000) * prices["output"]
+
+ return cost_prompt + cost_completion
+
+def count_tokens_estimate(text: str) -> int:
+ """Estimativa simples de tokens (aproximadamente 4 chars = 1 token)."""
+ return len(text) // 4
+
+def truncate_history():
+ """Mantém o histórico dentro do limite definido."""
+ global conversation_history
+
+ if len(conversation_history) > MAX_HISTORY_MESSAGES + 1:
+ system_msg = conversation_history[0]
+ conversation_history = [system_msg] + conversation_history[-(MAX_HISTORY_MESSAGES):]
+ print_colored("⚠️ Histórico truncado para manter limite de mensagens.", Fore.YELLOW)
+
+def ensure_save_dir():
+ """Garante que o diretório de salvamento existe."""
+ SAVE_DIR.mkdir(parents=True, exist_ok=True)
+
+# ======================================================================
+# 💾 FUNÇÕES DE SALVAMENTO/CARREGAMENTO (Mantidas)
+# ======================================================================
+
+def save_conversation(filename: Optional[str] = None):
+ """Salva a conversa atual em um arquivo JSON."""
+ ensure_save_dir()
+
+ if not filename:
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"chat_{timestamp}.json"
+
+ filepath = SAVE_DIR / filename
+
+ try:
+ with open(filepath, 'w', encoding='utf-8') as f:
+ json.dump({
+ "model": current_model,
+ "timestamp": datetime.now().isoformat(),
+ "messages": conversation_history
+ }, f, indent=2, ensure_ascii=False)
+
+ print_colored(f"✅ Conversa salva em: {filepath}", Fore.GREEN)
+ except Exception as e:
+ print_colored(f"❌ Erro ao salvar: {e}", Fore.RED)
+
+def load_conversation(filename: str):
+ """Carrega uma conversa de um arquivo JSON."""
+ global conversation_history, current_model
+
+ filepath = SAVE_DIR / filename if not filename.startswith('/') else Path(filename)
+
+ try:
+ with open(filepath, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+
+ conversation_history = data.get("messages", [])
+ current_model = data.get("model", DEFAULT_MODEL)
+
+ print_colored(f"✅ Conversa carregada: {len(conversation_history)} mensagens", Fore.GREEN)
+ print_colored(f"📋 Modelo: {current_model}", Fore.CYAN)
+ show_history()
+ except FileNotFoundError:
+ print_colored(f"❌ Arquivo não encontrado: {filepath}", Fore.RED)
+ except Exception as e:
+ print_colored(f"❌ Erro ao carregar: {e}", Fore.RED)
+
+def list_saved_conversations():
+ """Lista todas as conversas salvas."""
+ ensure_save_dir()
+
+ files = sorted(SAVE_DIR.glob("chat_*.json"), reverse=True)
+
+ if not files:
+ print_colored("📭 Nenhuma conversa salva encontrada.", Fore.YELLOW)
+ return
+
+ print_colored("\n📚 Conversas salvas:", Fore.CYAN, bright=True)
+ for i, f in enumerate(files[:10], 1): # Mostra últimas 10
+ stat = f.stat()
+ size = stat.st_size / 1024 # KB
+ mtime = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M")
+ print(f" {i}. {Fore.YELLOW}{f.name}{Style.RESET_ALL} ({size:.1f} KB) - {mtime}")
+
+# ======================================================================
+# 🤖 FUNÇÃO DE STREAMING COM MÉTRICAS
+# ======================================================================
+
+async def chat_stream(
+ model: str = DEFAULT_MODEL,
+ temperature: float = 0.7,
+ max_tokens: int = 1024
+) -> Tuple[str, Optional[Dict], float]:
+ """
+ Envia requisição de chat com streaming, exibe resposta e mede performance.
+ Retorna (full_response, usage_metadata, latency).
+ """
+ global is_streaming
+
+ payload = {
+ "model": model,
+ "messages": conversation_history,
+ "stream": True,
+ "temperature": temperature,
+ "max_tokens": max_tokens
+ }
+
+ full_response = ""
+ usage_metadata: Optional[Dict] = None
+ is_streaming = True
+ start_time = time.time() # Inicia a medição de latência
+
+ print_colored("\n🤖 Assistente: ", Fore.MAGENTA, bright=True, end="")
+
+ try:
+ timeout = aiohttp.ClientTimeout(total=120)
+ async with aiohttp.ClientSession(headers=HEADERS, timeout=timeout) as session:
+ async with session.post(API_URL, json=payload) as resp:
+
+ if resp.status != 200:
+ error_text = await resp.text()
+ print_colored(f"\n\n❌ Erro HTTP {resp.status}", Fore.RED)
+ print_colored(f"Detalhes: {error_text}", Fore.RED)
+ return "", None, 0.0
+
+ async for raw_line in resp.content:
+ line = raw_line.decode("utf-8", errors='ignore').strip()
+
+ if not line or not line.startswith("data:"):
+ continue
+
+ data_str = line[5:].strip()
+
+ if data_str == "[DONE]":
+ break
+
+ try:
+ data_json = json.loads(data_str)
+
+ if 'usage' in data_json:
+ usage_metadata = data_json['usage']
+
+ delta = data_json.get("choices", [{}])[0].get("delta", {}).get("content", "")
+
+ if delta:
+ delay = TYPING_DELAY if len(delta) < 50 else TYPING_DELAY / 3
+
+ for char in delta:
+ print(char, end="", flush=True)
+ full_response += char
+ await asyncio.sleep(delay)
+
+ except json.JSONDecodeError:
+ continue
+
+ except asyncio.TimeoutError:
+ print_colored("\n\n⏱️ Timeout: Requisição demorou muito.", Fore.RED)
+ except Exception as e:
+ print_colored(f"\n\n❌ Erro de conexão/stream: {e}", Fore.RED)
+ finally:
+ is_streaming = False
+ latency = time.time() - start_time
+
+ print() # Nova linha após resposta
+ return full_response, usage_metadata, latency
+
+# ======================================================================
+# 📋 COMANDOS ESPECIAIS
+# ======================================================================
+
+def show_help():
+ """Exibe menu de ajuda com comandos disponíveis."""
+ print_separator()
+ print_colored("📖 COMANDOS DISPONÍVEIS", Fore.CYAN, bright=True)
+ print_separator()
+ commands = [
+ ("/help", "Mostra esta mensagem de ajuda"),
+ ("/clear", "Limpa o histórico de conversa (mantém system prompt)"),
+ ("/history", "Mostra o histórico completo"),
+ ("/save [nome]", "Salva a conversa atual"),
+ ("/load ", "Carrega uma conversa salva"),
+ ("/list", "Lista conversas salvas"),
+ ("/model", "Troca o modelo de IA"),
+ ("/info", "Mostra informações da sessão"),
+ ("/vpa ", "Calcula Multiplicador de Voto (PoA)"),
+ ("/vpa_decay ", "Calcula PoA com Decaimento (0.0 a 1.0)"), # NOVO COMANDO
+ ("/sim_voto <$ALE> ", "Simula Poder de Voto Reputacional ($ALE-V)"),
+ ("/exit, /quit", "Encerra o chat"),
+ ]
+ for cmd, desc in commands:
+ print(f" {Fore.GREEN}{cmd:35s}{Style.RESET_ALL} - {desc}")
+ print_separator()
+
+def show_info():
+ """Mostra informações da sessão atual."""
+ print_separator()
+ print_colored("ℹ️ INFORMAÇÕES DA SESSÃO", Fore.CYAN, bright=True)
+ print_separator()
+ print(f" Modelo atual: {Fore.GREEN}{current_model}{Style.RESET_ALL}")
+ print(f" Mensagens no histórico: {Fore.YELLOW}{len(conversation_history)}{Style.RESET_ALL}")
+ print(f" Limite de Contexto: {Fore.YELLOW}{MAX_HISTORY_MESSAGES} mensagens (excluindo system){Style.RESET_ALL}")
+
+ total_tokens = sum(count_tokens_estimate(msg['content']) for msg in conversation_history)
+ print(f" Tokens estimados total: {Fore.YELLOW}~{total_tokens}{Style.RESET_ALL}")
+ print_separator()
+
+def change_model():
+ """Interface para trocar o modelo."""
+ global current_model
+
+ print_separator()
+ print_colored("🔄 SELECIONE UM MODELO", Fore.CYAN, bright=True)
+ print_separator()
+
+ for key, (model_id, description) in AVAILABLE_MODELS.items():
+ current = " (atual)" if model_id == current_model else ""
+ cost_info = f" (Input: ${MODEL_PRICING.get(model_id, {}).get('input', 'N/A'):.2f}/M)" if model_id in MODEL_PRICING else ""
+
+ print(f" {Fore.GREEN}{key}{Style.RESET_ALL}. {description}{Fore.YELLOW}{current}{cost_info}{Style.RESET_ALL}")
+
+ print_separator()
+ choice = input(f"{Fore.CYAN}Escolha (1-{len(AVAILABLE_MODELS)}): {Style.RESET_ALL}").strip()
+
+ if choice in AVAILABLE_MODELS:
+ current_model = AVAILABLE_MODELS[choice][0]
+ print_colored(f"✅ Modelo alterado para: {current_model}", Fore.GREEN)
+ else:
+ print_colored("❌ Opção inválida.", Fore.RED)
+
+def show_history():
+ """Exibe o histórico de conversa formatado."""
+ print_separator()
+ print_colored("📜 HISTÓRICO DA CONVERSA", Fore.CYAN, bright=True)
+ print_separator()
+
+ for i, msg in enumerate(conversation_history, 1):
+ role = msg['role']
+ content = msg['content']
+
+ if role == "system":
+ color = Fore.BLUE
+ icon = "⚙️"
+ elif role == "user":
+ color = Fore.GREEN
+ icon = "👤"
+ else:
+ color = Fore.MAGENTA
+ icon = "🤖"
+
+ print(f"\n{color}{Style.BRIGHT}{icon} {role.upper()}{Style.RESET_ALL}")
+ # Trunca mensagens muito longas
+ display_content = content if len(content) < 500 else content[:500] + "..."
+ print(f"{display_content}")
+
+ print_separator()
+
+# ======================================================================
+# 🎮 LOOP PRINCIPAL
+# ======================================================================
+
+async def main_loop():
+ """Loop principal do chat interativo."""
+ global conversation_history, current_model
+
+ # --- 1. Inicialização ---
+ if not API_KEY:
+ print_colored("❌ ERRO: A chave de API OPENROUTER_API_KEY não está configurada.", Fore.RED, bright=True)
+ print_colored("Por favor, configure-a como uma variável de ambiente.", Fore.YELLOW)
+ sys.exit(1)
+
+ # Inicializa com system prompt padrão se vazio (apenas na primeira rodada)
+ if not conversation_history:
+ conversation_history = [{
+ "role": "system",
+ "content": "Você é um assistente de IA prestativo, inteligente e conciso, alinhado à busca pela verdade (Aletheia)."
+ }]
+
+ print_separator("═", 70)
+ print_colored("🚀 CHAT INTERATIVO KERNEL AGI - Aletheia Terminal", Fore.CYAN, bright=True)
+ print_separator("═", 70)
+ print_colored(f"Modelo: {current_model}", Fore.YELLOW)
+ print_colored("Digite /help para ver comandos disponíveis. Use /exit para sair.", Fore.BLUE)
+ print_separator("═", 70)
+
+ # Imprime o system prompt de início
+ if conversation_history and conversation_history[0].get("role") == "system":
+ print_colored(f"⚙️ MODO: {conversation_history[0]['content']}", Fore.BLUE)
+ print_separator("─", 70)
+
+ # --- 2. Loop de Interação ---
+ while True:
+ try:
+ # Leitura de input do usuário
+ user_input = await asyncio.get_event_loop().run_in_executor(
+ None,
+ lambda: input(f"\n{Fore.GREEN}{Style.BRIGHT}👤 Você:{Style.RESET_ALL} ")
+ )
+
+ user_input = user_input.strip()
+
+ if not user_input:
+ continue
+
+ # Processa comandos
+ if user_input.startswith('/'):
+ cmd_parts = user_input.split(maxsplit=4)
+ cmd = cmd_parts[0].lower()
+ arg1 = cmd_parts[1] if len(cmd_parts) > 1 else None
+ arg2 = cmd_parts[2] if len(cmd_parts) > 2 else None
+ arg3 = cmd_parts[3] if len(cmd_parts) > 3 else None
+ arg4 = cmd_parts[4] if len(cmd_parts) > 4 else None
+
+
+ if cmd in ['/exit', '/quit']:
+ print_colored("\n👋 Até logo! Busque sempre a verdade (Aletheia).", Fore.CYAN)
+ break
+
+ elif cmd == '/clear':
+ system_msg = conversation_history[0] if conversation_history else None
+ conversation_history = [system_msg] if system_msg else []
+ print_colored("🗑️ Histórico limpo.", Fore.YELLOW)
+ continue
+
+ elif cmd == '/history':
+ show_history()
+ continue
+
+ elif cmd == '/save':
+ save_conversation(arg1)
+ continue
+
+ elif cmd == '/load':
+ if arg1: load_conversation(arg1)
+ else: print_colored("❌ Uso: /load ", Fore.RED)
+ continue
+
+ elif cmd == '/list':
+ list_saved_conversations()
+ continue
+
+ elif cmd == '/model':
+ change_model()
+ continue
+
+ elif cmd == '/info':
+ show_info()
+ continue
+
+ elif cmd == '/vpa': # COMANDO VPA - Multiplicador de Autoridade
+ try:
+ h_index = int(arg1)
+ citations = int(arg2)
+ multiplier = get_reputational_multiplier(h_index, citations)
+
+ print_separator("┄", 70)
+ print_colored("🔬 CÁLCULO DE PROVA DE AUTORIDADE (PoA)", Fore.BLUE, bright=True)
+ print(f"H-Index Verificado: {Fore.YELLOW}{h_index}{Style.RESET_ALL}")
+ print(f"Citações Totais: {Fore.YELLOW}{citations}{Style.RESET_ALL}")
+ print_colored(f"Multiplicador de Voto (PoA): {Fore.GREEN}{multiplier:.2f}x{Style.RESET_ALL}", Fore.GREEN, bright=True)
+ print_separator("┄", 70)
+ except (ValueError, TypeError):
+ print_colored("❌ Uso: /vpa (ex: /vpa 15 500)", Fore.RED)
+ continue
+
+ elif cmd == '/vpa_decay': # COMANDO VPA_DECAY - Decaimento da Autoridade
+ try:
+ h_index = int(arg1)
+ citations = int(arg2)
+ decay = float(arg3)
+
+ if not (0.0 <= decay <= 1.0):
+ raise ValueError("Decaimento deve estar entre 0.0 e 1.0.")
+
+ multiplier = get_reputational_multiplier(h_index, citations, verification_decay=decay)
+
+ print_separator("┄", 70)
+ print_colored("📉 CÁLCULO DE PoA COM DECAIMENTO", Fore.YELLOW, bright=True)
+ print(f"H-Index / Citações: {Fore.YELLOW}{h_index} / {citations}{Style.RESET_ALL}")
+ print(f"Fator de Decaimento (Prova Fraca): {Fore.RED}{decay:.2f}{Style.RESET_ALL} (0.0 = Nulo, 1.0 = Máximo)")
+ print_colored(f"Multiplicador Efetivo (PoA): {Fore.GREEN}{multiplier:.2f}x{Style.RESET_ALL}", Fore.GREEN, bright=True)
+ print_separator("┄", 70)
+ except (ValueError, TypeError):
+ print_colored("❌ Uso: /vpa_decay ", Fore.RED)
+ print_colored(" Ex: /vpa_decay 15 500 0.5", Fore.YELLOW)
+ continue
+
+
+ elif cmd == '/sim_voto': # NOVO COMANDO - Simulação $ALE-V
+ try:
+ staked_ale = float(arg1)
+ h_index = int(arg2)
+ citations = int(arg3)
+ decay = float(arg4) if arg4 else 1.0 # Opcional: Decaimento
+
+ if not (0.0 <= decay <= 1.0):
+ raise ValueError("Decaimento deve estar entre 0.0 e 1.0.")
+
+ vote_power, multiplier = calculate_ale_v_power(staked_ale, h_index, citations, verification_decay=decay)
+
+ print_separator("┄", 70)
+ print_colored("🌊 SIMULAÇÃO: PODER DE VOTO REPUTACIONAL ($ALE-V)", Fore.MAGENTA, bright=True)
+ print(f"Stake Financeiro ($ALE): {Fore.YELLOW}{staked_ale:,.2f}{Style.RESET_ALL}")
+ print(f"Multiplicador PoA (H/C): {Fore.YELLOW}{multiplier:.2f}x{Style.RESET_ALL}")
+ print(f"Decaimento Aplicado: {Fore.RED}{decay:.2f}x{Style.RESET_ALL}")
+ print_separator("─", 70)
+ print_colored(f"Total $ALE-V (Poder de Voto): {Fore.GREEN}{vote_power:,.2f}{Style.RESET_ALL}", Fore.GREEN, bright=True)
+ print_separator("┄", 70)
+ except (ValueError, TypeError):
+ print_colored("❌ Uso: /sim_voto <$ALE Stake> [Decaimento]", Fore.RED)
+ print_colored(" Ex: /sim_voto 1000 20 1200 0.8 (Decaimento é opcional, default 1.0)", Fore.YELLOW)
+ continue
+
+ elif cmd == '/help':
+ show_help()
+ continue
+
+ else:
+ print_colored(f"❌ Comando desconhecido: {cmd}", Fore.RED)
+ print_colored("Digite /help para ver comandos disponíveis", Fore.YELLOW)
+ continue
+
+ # Adiciona mensagem do usuário ao histórico
+ conversation_history.append({"role": "user", "content": user_input})
+
+ # Chama a API com streaming e captura métricas
+ response, metadata, latency = await chat_stream(model=current_model)
+
+ # Adiciona resposta ao histórico
+ if response.strip():
+ conversation_history.append({"role": "assistant", "content": response})
+ truncate_history()
+
+ # --- RELATÓRIO PÓS-RESPOSTA ---
+ if metadata and 'usage' in metadata:
+ usage = metadata['usage']
+ prompt_tokens = usage.get('prompt_tokens', 0)
+ completion_tokens = usage.get('completion_tokens', 0)
+
+ estimated_cost = calculate_estimated_cost(current_model, prompt_tokens, completion_tokens)
+
+ print_separator("┄", 70)
+ print_colored(f"📊 Prompt: {prompt_tokens} tokens | Completion: {completion_tokens} tokens", Fore.YELLOW)
+ print_colored(f"⏱️ Latência Total: {latency:.2f} s", Fore.YELLOW)
+ print_colored(f"💰 Custo Estimado: ${estimated_cost:.6f} USD", Fore.YELLOW, bright=True)
+ print_separator("┄", 70)
+ # -----------------------------
+
+ else:
+ # Remove a mensagem do usuário se a resposta falhou (para não poluir o histórico)
+ conversation_history.pop()
+
+ except KeyboardInterrupt:
+ print_colored("\n\n⚠️ Sessão interrompida. Use /exit para sair.", Fore.YELLOW)
+ continue
+
+ except Exception as e:
+ if not is_streaming:
+ print_colored(f"\n❌ Erro fatal no loop: {e}", Fore.RED)
+ break
+
+# ======================================================================
+# 🚀 EXECUÇÃO
+# ======================================================================
+def entry_point():
+ if not os.environ.get("OPENROUTER_API_KEY"):
+ print_colored("❌ ERRO: Configure a variável de ambiente OPENROUTER_API_KEY", Fore.RED, bright=True)
+ print_colored("Obtenha sua chave em: https://openrouter.ai/keys", Fore.CYAN)
+ sys.exit(1)
+
+ try:
+ # Configura loop de eventos (necessário para rodar input() dentro de asyncio)
+ asyncio.run(main_loop())
+ except KeyboardInterrupt:
+ sys.exit(0)
+ except Exception as e:
+ print_colored(f"\n❌ Erro crítico no início do programa: {e}", Fore.RED)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ entry_point()
\ No newline at end of file
diff --git a/kernel_agi_cli/pyproject.toml b/kernel_agi_cli/pyproject.toml
new file mode 100644
index 00000000..d95dc3d8
--- /dev/null
+++ b/kernel_agi_cli/pyproject.toml
@@ -0,0 +1,25 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "kernel-agi-cli"
+version = "1.0.0"
+authors = [
+ { name="Your Name", email="you@example.com" },
+]
+description = "Um cliente de chat de console interativo para a API OpenRouter."
+readme = "README.md"
+requires-python = ">=3.8"
+classifiers = [
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "aiohttp>=3.9.5",
+ "colorama>=0.4.6",
+]
+
+[project.scripts]
+kernel-chat = "kernel_agi.main:entry_point"
\ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100644
index fd3f37fa..00000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,100 +0,0 @@
-site_name: Talos Documentation
-site_description: AI Protocol Owner - Autonomous Treasury Management and Governance
-site_url: https://talos-agent.github.io/talos/
-repo_url: https://github.com/talos-agent/talos
-repo_name: talos-agent/talos
-edit_uri: edit/main/docs/
-
-theme:
- name: material
- features:
- - navigation.tabs
- - navigation.sections
- - navigation.expand
- - navigation.path
- - navigation.top
- - search.highlight
- - search.share
- - content.code.copy
- - content.code.select
- - content.tabs.link
- palette:
- - scheme: default
- primary: deep purple
- accent: purple
- toggle:
- icon: material/brightness-7
- name: Switch to dark mode
- - scheme: slate
- primary: deep purple
- accent: purple
- toggle:
- icon: material/brightness-4
- name: Switch to light mode
- font:
- text: Roboto
- code: Roboto Mono
-
-plugins:
- - search
- - mkdocstrings:
- handlers:
- python:
- options:
- docstring_style: google
-
-markdown_extensions:
- - admonition
- - pymdownx.details
- - pymdownx.superfences
- - pymdownx.highlight:
- anchor_linenums: true
- - pymdownx.inlinehilite
- - pymdownx.snippets
- - pymdownx.tabbed:
- alternate_style: true
- - pymdownx.tasklist:
- custom_checkbox: true
- - attr_list
- - md_in_html
- - toc:
- permalink: true
-
-nav:
- - Home: index.md
- - Getting Started:
- - Overview: getting-started/overview.md
- - Installation: getting-started/installation.md
- - Quick Start: getting-started/quickstart.md
- - Architecture:
- - Core Components: architecture/components.md
- - Agent System: architecture/agents.md
- - Hypervisor: architecture/hypervisor.md
- - Skills & Services: architecture/skills-services.md
- - CLI Reference:
- - Overview: cli/overview.md
- - Interactive Mode: cli/interactive.md
- - GitHub Commands: cli/github.md
- - Twitter Commands: cli/twitter.md
- - Cryptography: cli/crypto.md
- - Development:
- - Contributing: development/contributing.md
- - Code Style: development/code-style.md
- - Performance: development/performance.md
- - Testing: development/testing.md
- - Philosophy:
- - Vision: philosophy/vision.md
- - Roadmap: philosophy/roadmap.md
- - API Reference:
- - Core: api/core.md
- - Services: api/services.md
- - Tools: api/tools.md
-
-extra:
- social:
- - icon: fontawesome/brands/github
- link: https://github.com/talos-agent/talos
- - icon: fontawesome/brands/twitter
- link: https://twitter.com/talos_protocol
-
-copyright: Copyright © 2024 Talos Protocol
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 4d22686c..00000000
--- a/mypy.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[mypy]
-strict = true
-ignore_missing_imports = True
-
-[mypy-requests]
-ignore_missing_imports = True
diff --git a/proposal_example.txt b/proposal_example.txt
deleted file mode 100644
index de46a371..00000000
--- a/proposal_example.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-[PROPOSAL]
-This is a test proposal. It is a good proposal.
-
-[FEEDBACK]
-vitalik: I like it
-hayden: I also like it
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index 8f1b921e..00000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,74 +0,0 @@
-[project]
-name = "talos"
-version = "0.1.0"
-description = "An AI agent for managing a cryptocurrency treasury."
-authors = [{ name = "Jules", email = "agent@example.com" }]
-readme = "README.md"
-requires-python = ">=3.12"
-dependencies = [
- "PyGithub==2.6.1",
- "textblob==0.19.0",
- "beautifulsoup4==4.13.4",
- "requests==2.32.4",
- "pypdf==5.8.0",
- "pydantic==2.11.7",
- "langchain==0.3.26",
- "langchain-community==0.3.27",
- "langchain-openai==0.3.28",
- "langgraph>=0.2.0",
- "langsmith>=0.1.0",
- "duckduckgo-search==8.1.1",
- "langmem>=0.0.29",
- "tiktoken==0.9.0",
- "ipfshttpclient==0.7.0",
- "pinata-python==1.0.0",
- "tweepy==4.16.0",
- "instructor==1.10.0",
- "google-api-python-client>=2.176.0",
- "apscheduler==3.10.4",
- "eth-typing==5.2.1",
- "eth-rpc-py==0.1.34",
- "fastapi==0.115.6",
- "uvicorn==0.32.1",
- "alembic==1.14.0",
- "typer==0.12.5",
- "eth-typeshed-py>=0.1.34",
- "pandas>=2.3.2",
- "numpy>=2.3.3",
- "numerize>=0.12",
-]
-
-[build-system]
-requires = ["hatchling"]
-build-backend = "hatchling.build"
-
-[tool.hatch.build.targets.wheel]
-packages = ["src/talos", "src/crypto_sentiment"]
-
-[tool.hatch.metadata]
-allow-direct-references = true
-
-[project.scripts]
-talos = "talos.cli.main:app"
-
-[project.optional-dependencies]
-dev = [
- "ruff==0.12.4",
- "mypy==1.17.0",
- "pytest==8.4.1",
- "pytest-mock==3.14.1",
- "isort==5.12.0"
-]
-
-[tool.ruff]
-line-length = 120
-
-[tool.mypy]
-strict = true
-
-[tool.pytest.ini_options]
-testpaths = ["tests"]
-addopts = "--ignore=integration_tests"
-
-[tool.ruff.lint]
-ignore = []
diff --git a/rofl.yaml b/rofl.yaml
deleted file mode 100644
index f61031ba..00000000
--- a/rofl.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-name: talos
-version: 0.1.0
-repository: https://github.com/talos-agent/talos
-homepage: https://talos.is
-description: Talos is.
-tee: tdx
-kind: container
-resources:
- memory: 512
- cpus: 1
- storage:
- kind: disk-persistent
- size: 5120
-artifacts:
- builder: ghcr.io/oasisprotocol/rofl-dev:v0.2.0@sha256:a71fe850ba3f62076195e365be090fc5bf6256a3391fbd4e15b7c3fc0cce535c
- firmware: https://github.com/oasisprotocol/oasis-boot/releases/download/v0.6.2/ovmf.tdx.fd#db47100a7d6a0c1f6983be224137c3f8d7cb09b63bb1c7a5ee7829d8e994a42f
- kernel: https://github.com/oasisprotocol/oasis-boot/releases/download/v0.6.2/stage1.bin#e5d4d654ca1fa2c388bf64b23fc6e67815893fc7cb8b7cfee253d87963f54973
- stage2: https://github.com/oasisprotocol/oasis-boot/releases/download/v0.6.2/stage2-podman.tar.bz2#b2ea2a0ca769b6b2d64e3f0c577ee9c08f0bb81a6e33ed5b15b2a7e50ef9a09f
- container:
- runtime: https://github.com/oasisprotocol/oasis-sdk/releases/download/rofl-containers%2Fv0.7.3/rofl-containers#964fbd8edaea8041fd9c5304bb4631b7126d57d06062cc3922e50313cdeef618
- compose: docker-compose.rofl.yml
-deployments:
- mainnet:
- app_id: rofl1qpykfkl6ea78cyy67d35f7fmpk3pg36vashka4v9
- network: mainnet
- paratime: sapphire
- admin: oasis1qz2lty9v4glt5ts8ljhfpnd05dy3cwmtnyshws8q
- oci_repository: ghcr.io/talos-agent/talos:latest-orc
- trust_root:
- height: 26130891
- hash: 01c2dac4f9d636e9e159b1e56ebecbcdda8a9dccb452128a3954d0cf8b6e033e
- policy:
- quotes:
- pcs:
- tcb_validity_period: 30
- min_tcb_evaluation_data_number: 18
- tdx: {}
- enclaves:
- - id: jypB1qfYh2YpoXQbDglIxMxHA2wqOWpH68cLAhp0CBkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
- - id: v6N3N67EmLtKgCGuLia6+aw/ZtgB2ZxcfHQxu3Bn+c0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
- endorsements:
- - and:
- - provider: oasis1qzc8pldvm8vm3duvdrj63wgvkw34y9ucfcxzetqr
- - or:
- - provider_instance_admin: oasis1qpx5xq4kwtuawjdc0wty0d0mz08ha62jrc2c236j
- - provider_instance_admin: oasis1qz2lty9v4glt5ts8ljhfpnd05dy3cwmtnyshws8q
- fees: endorsing_node
- max_expiration: 3
- secrets:
- - name: OPENAI_API_KEY
- value: pGJwa1ggCl1w9SjAdDuo+xMXJKvefcJWA3SlsqqhSA57dJ4EKz1kbmFtZVgedNT8MJJ4I3ifwPjostUP2FMijDvi09YXHgiOBYerZW5vbmNlTx6x8KHbmS6ZXJbPpGUlcGV2YWx1ZVhwIhFMFiJuAc9I7Z6d5GnX6aO5H/RPbSKOgRzUnmUQvIMZKCLASiT4yAmR7PF+UbH8ojPZHnq7kErzLMjM72Sbo5jHWmNiBB+Xu85+h7jhjqy11NNYByT2AaPfyyyKWL3ldtB+zVHo3GJHBc9Gs4L9cg==
- machines:
- default:
- provider: oasis1qzc8pldvm8vm3duvdrj63wgvkw34y9ucfcxzetqr
- offer: large
- id: 000000000000001e
- permissions:
- log.view:
- - oasis1qpx5xq4kwtuawjdc0wty0d0mz08ha62jrc2c236j
- - oasis1qqugf308vwexacjfszhmk5lgqv9kwgf9ry5zevhx
- testnet:
- app_id: rofl1qz8c57nvrru0rdtv7242rzwv269a87zh6c8auqr3
- network: testnet
- paratime: sapphire
- admin: oasis1qqfrhj832yux84nn5k0u0hjaa7jps8rc0cj9rku5
- oci_repository: ghcr.io/kostko/talos:latest-orc
- trust_root:
- height: 27959263
- hash: 6c532bc656539449892330df3d78a110a6359013c985627ed5fafcef22e2b755
- policy:
- quotes:
- pcs:
- tcb_validity_period: 30
- min_tcb_evaluation_data_number: 18
- tdx: {}
- enclaves:
- - id: IbQkE9IQxcNkectzoHZQwk4wcNmRV0AYUulrk2gDkFUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
- - id: bvoQKpPTsKjFdrogy/cTOCpA4ftvgN97LPOZXVgOF3cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
- endorsements:
- - and:
- - provider: oasis1qrfeadn03ljm0kfx8wx0d5zf6kj79pxqvv0dukdm
- - or:
- - provider_instance_admin: oasis1qzeht65n0675qar7zg7yrdj9hcajwrrjwgzt0y8a
- - provider_instance_admin: oasis1qqfrhj832yux84nn5k0u0hjaa7jps8rc0cj9rku5
- fees: endorsing_node
- max_expiration: 3
- machines:
- default:
- provider: oasis1qrfeadn03ljm0kfx8wx0d5zf6kj79pxqvv0dukdm
- offer: test
- id: "0000000000000004"
- permissions:
- log.view:
- - oasis1qpx5xq4kwtuawjdc0wty0d0mz08ha62jrc2c236j
- - oasis1qqugf308vwexacjfszhmk5lgqv9kwgf9ry5zevhx
diff --git a/scripts/build_and_push_container_image.sh b/scripts/build_and_push_container_image.sh
deleted file mode 100755
index 65de8ff8..00000000
--- a/scripts/build_and_push_container_image.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-set -e
-
-COMPOSE_FILE=$(yq -r '.artifacts.container.compose' rofl.yaml)
-TARGET_IMAGE=$(yq -r '.services."talos-agent".image' ${COMPOSE_FILE} | cut -d '@' -f 1)
-
-BUILDER_NAME=buildkit_23
-BUILDER_IMAGE=moby/buildkit:v0.23.2
-SOURCE_DATE_EPOCH=1755248916
-
-# Ensure we use the right buildkit for reproducibility.
-if ! docker buildx inspect ${BUILDER_NAME} &>/dev/null; then
- docker buildx create --use --driver-opt image=${BUILDER_IMAGE} --name ${BUILDER_NAME}
-fi
-
-# Build the container in a reproducible way.
-METADATA_FILE=$(mktemp)
-
-docker buildx build \
- --builder ${BUILDER_NAME} \
- --no-cache \
- --provenance false \
- --build-arg SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH}" \
- --output type=registry,name=${TARGET_IMAGE},rewrite-timestamp=true \
- --metadata-file "${METADATA_FILE}" \
- .
-
-# Output the image digest.
-IMAGE_NAME=$(jq -r '."image.name" + "@" + ."containerimage.digest"' "${METADATA_FILE}")
-if [[ -n "${OUTPUT_IMAGE_NAME_PATH}" ]]; then
- echo "${IMAGE_NAME}" > ${OUTPUT_IMAGE_NAME_PATH}
-fi
-echo "${IMAGE_NAME}"
diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh
deleted file mode 100755
index 0730359d..00000000
--- a/scripts/install_deps.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-uv pip install -r requirements.txt
diff --git a/scripts/propose_transactions/.gitignore b/scripts/propose_transactions/.gitignore
deleted file mode 100644
index 3c3629e6..00000000
--- a/scripts/propose_transactions/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-node_modules
diff --git a/scripts/propose_transactions/main.mjs b/scripts/propose_transactions/main.mjs
deleted file mode 100644
index a96260d8..00000000
--- a/scripts/propose_transactions/main.mjs
+++ /dev/null
@@ -1,162 +0,0 @@
-import SafeApiKit from "@safe-global/api-kit";
-import Safe from "@safe-global/protocol-kit";
-import { OperationType } from "@safe-global/types-kit";
-import * as yaml from "yaml";
-import * as oasis from "@oasisprotocol/client";
-import * as oasisRT from "@oasisprotocol/client-rt";
-import xhr2 from "xhr2";
-import { readFileSync } from "node:fs";
-global.XMLHttpRequest = xhr2;
-
-const DEPLOYMENT = process.env["DEPLOYMENT"];
-const PROPOSER_PRIVATE_KEY = process.env["PROPOSER_PRIVATE_KEY"];
-const SAFE_ADDRESS = process.env["SAFE_ADDRESS"];
-const APP_CONFIG_UPDATE_FILE = process.env["APP_CONFIG_UPDATE_FILE"];
-const DEPLOY_FILE = process.env["DEPLOY_FILE"];
-
-// Parse and load the ROFL app manifest.
-const roflAppManifest = yaml.parse(readFileSync("rofl.yaml", "utf8"));
-const roflAppDeployment = roflAppManifest.deployments[DEPLOYMENT];
-const roflAppId = roflAppDeployment.app_id;
-const roflMachine = roflAppDeployment.machines["default"];
-const roflAppConfigUpdate = oasis.misc.fromCBOR(
- readFileSync(APP_CONFIG_UPDATE_FILE),
-);
-const roflDeploy = oasis.misc.fromCBOR(readFileSync(DEPLOY_FILE));
-
-const networks = {
- // Sapphire Mainnet.
- mainnet: {
- runtimeId:
- "000000000000000000000000000000000000000000000000f80306c9858e7279",
- chainId: 23294n,
- grpcApi: "https://grpc.oasis.io",
- web3Api: "https://sapphire.oasis.io",
- safeApi: "https://transaction.safe.oasis.io/api",
- },
- // Sapphire Testnet.
- testnet: {
- runtimeId:
- "000000000000000000000000000000000000000000000000a6d1e3ebf60dff6c",
- chainId: 23295n,
- grpcApi: "https://testnet.grpc.oasis.io",
- web3Api: "https://testnet.sapphire.oasis.io",
- safeApi: "https://transaction-testnet.safe.oasis.io/api",
- },
-};
-const networkInfo = networks[roflAppDeployment.network];
-
-async function generateTransactions() {
- const sapphireRuntimeId = oasis.misc.fromHex(networkInfo.runtimeId);
- const nic = new oasis.client.NodeInternal(networkInfo.grpcApi);
-
- const roflmarket = new oasisRT.roflmarket.Wrapper(sapphireRuntimeId);
- const rofl = new oasisRT.rofl.Wrapper(sapphireRuntimeId);
-
- const app = await rofl
- .queryApp()
- .setArgs({ id: oasisRT.rofl.fromBech32(roflAppId) })
- .query(nic);
- console.log("Found app", app);
-
- const machine = await roflmarket
- .queryInstance()
- .setArgs({
- id: oasis.misc.fromHex(roflMachine.id),
- provider: oasis.staking.addressFromBech32(roflMachine.provider),
- })
- .query(nic);
- console.log("Found machine", machine);
-
- if (!machine.deployment?.app_id) {
- throw new Error(
- `Machine ${roflMachine.id} isn't running any app. Expected ${roflAppId}`,
- );
- }
- if (oasisRT.rofl.toBech32(machine.deployment.app_id) !== roflAppId) {
- throw new Error(
- `Machine ${roflMachine.id} is running app ${oasisRT.rofl.toBech32(machine.deployment.app_id)}. Expected ${roflAppId}`,
- );
- }
-
- const txUpdateEnclaves = rofl
- .callUpdate()
- .setBody(roflAppConfigUpdate.call.body)
- .toSubcall();
-
- const txUpdateMachine = roflmarket
- .callInstanceExecuteCmds()
- .setBody(roflDeploy.call.body)
- .toSubcall();
-
- const transactions = [txUpdateEnclaves, txUpdateMachine];
- console.log("Transactions to propose", transactions);
- return transactions;
-}
-
-const safeClient = new SafeApiKit({
- chainId: networkInfo.chainId,
- txServiceUrl: networkInfo.safeApi,
-});
-
-const safeProposer = await Safe.init({
- provider: networkInfo.web3Api,
- // Generate a random ethereum private key, save it into github secrets
- // https://github.com/talos-agent/talos/settings/secrets/actions
- // and add its address as proposer to oasis safe
- // https://safe.oasis.io/settings/setup?safe=sapphire-testnet:0x4b5ca97d1F45a8b589c0C161ebB258D50F756468
- signer: PROPOSER_PRIVATE_KEY,
- safeAddress: SAFE_ADDRESS,
-});
-
-const safeTransaction = await safeProposer.createTransaction({
- transactions: (await generateTransactions()).map((tx) => ({
- ...tx,
- value: tx.value ? tx.value.toString() : "0",
- operation: OperationType.Call,
- })),
-});
-
-const safeTxHash = await safeProposer.getTransactionHash(safeTransaction);
-const signature = await safeProposer.signHash(safeTxHash);
-
-// Retry logic for proposeTransaction
-let retryCount = 0;
-const maxRetries = 2;
-
-while (retryCount <= maxRetries) {
- try {
- console.log(
- `Proposing transaction - Attempt ${retryCount + 1}/${maxRetries + 1}`,
- );
-
- await safeClient.proposeTransaction({
- safeAddress: await safeProposer.getAddress(),
- safeTransactionData: safeTransaction.data,
- safeTxHash,
- senderAddress: signature.signer,
- senderSignature: signature.data,
- });
-
- console.log("Proposed transaction hash", safeTxHash);
- break; // Success, exit retry loop
- } catch (error) {
- retryCount++;
- console.error(
- `Transaction proposal failed on attempt ${retryCount}:`,
- error.message,
- );
-
- if (retryCount > maxRetries) {
- console.error(
- `Transaction proposal failed after ${maxRetries + 1} attempts`,
- );
- throw error;
- }
-
- // Wait before retry (1s, 2s)
- const delay = 1000 * retryCount;
- console.log(`Retrying in ${delay}ms...`);
- await new Promise((resolve) => setTimeout(resolve, delay));
- }
-}
diff --git a/scripts/propose_transactions/package.json b/scripts/propose_transactions/package.json
deleted file mode 100644
index a5de9cf0..00000000
--- a/scripts/propose_transactions/package.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "dependencies": {
- "@oasisprotocol/client": "^1.3.0",
- "@oasisprotocol/client-rt": "^1.3.0",
- "@safe-global/api-kit": "^4.0.0",
- "@safe-global/protocol-kit": "^6.1.0",
- "@safe-global/types-kit": "^3.0.0",
- "xhr2": "^0.2.1",
- "yaml": "^2.8.1"
- }
-}
diff --git a/scripts/propose_transactions/yarn.lock b/scripts/propose_transactions/yarn.lock
deleted file mode 100644
index 4cd75149..00000000
--- a/scripts/propose_transactions/yarn.lock
+++ /dev/null
@@ -1,366 +0,0 @@
-# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
-# yarn lockfile v1
-
-
-"@adraffy/ens-normalize@^1.11.0":
- version "1.11.0"
- resolved "https://registry.yarnpkg.com/@adraffy/ens-normalize/-/ens-normalize-1.11.0.tgz#42cc67c5baa407ac25059fcd7d405cc5ecdb0c33"
- integrity sha512-/3DDPKHqqIqxUULp8yP4zODUY1i+2xvVWsv8A79xGWdCAG+8sb0hRh0Rk2QyOJUnnbyPUAZYcpBuRe3nS2OIUg==
-
-"@noble/ciphers@^1.3.0":
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/@noble/ciphers/-/ciphers-1.3.0.tgz#f64b8ff886c240e644e5573c097f86e5b43676dc"
- integrity sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==
-
-"@noble/curves@1.9.6":
- version "1.9.6"
- resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.9.6.tgz#b45ebedca85bb75782f6be7e7f120f0c423c99e0"
- integrity sha512-GIKz/j99FRthB8icyJQA51E8Uk5hXmdyThjgQXRKiv9h0zeRlzSCLIzFw6K1LotZ3XuB7yzlf76qk7uBmTdFqA==
- dependencies:
- "@noble/hashes" "1.8.0"
-
-"@noble/curves@^1.6.0", "@noble/curves@^1.9.1", "@noble/curves@~1.9.0":
- version "1.9.7"
- resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.9.7.tgz#79d04b4758a43e4bca2cbdc62e7771352fa6b951"
- integrity sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw==
- dependencies:
- "@noble/hashes" "1.8.0"
-
-"@noble/hashes@1.8.0", "@noble/hashes@^1.2.0", "@noble/hashes@^1.5.0", "@noble/hashes@^1.8.0", "@noble/hashes@~1.8.0":
- version "1.8.0"
- resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.8.0.tgz#cee43d801fcef9644b11b8194857695acd5f815a"
- integrity sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==
-
-"@oasisprotocol/client-rt@^1.3.0":
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/@oasisprotocol/client-rt/-/client-rt-1.3.0.tgz#28923efd5edc04bb8ddbb28c506c029e1b0b0ea5"
- integrity sha512-YMD0kg0LFk2P0d3VhNRN7OPuX7f5UwsoDhujRa52Q2OchufPrmG9Joss/LSXvPmG0r2pr8OY02mroWa2C6V4PA==
- dependencies:
- "@noble/curves" "^1.6.0"
- "@noble/hashes" "^1.5.0"
- "@oasisprotocol/client" "^1.3.0"
- "@oasisprotocol/deoxysii" "^0.0.6"
- tweetnacl "^1.0.3"
- viem "^2.31.3"
-
-"@oasisprotocol/client@^1.3.0":
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/@oasisprotocol/client/-/client-1.3.0.tgz#1bc5849400a692ba33c9ab7de376c34342e97a57"
- integrity sha512-eKavJ97/GmdwtFVZ0OqMj5CB/zOHkDJPUZx+mjUItUMurTFc3la0PMeRPkfeDmct9+2kvP+3HsR74JiOq8uW4w==
- dependencies:
- "@noble/hashes" "^1.5.0"
- bech32 "^2.0.0"
- bip39 "^3.1.0"
- cborg "^2.0.3"
- grpc-web "^1.5.0"
- protobufjs "~7.4.0"
- tweetnacl "^1.0.3"
-
-"@oasisprotocol/deoxysii@^0.0.6":
- version "0.0.6"
- resolved "https://registry.yarnpkg.com/@oasisprotocol/deoxysii/-/deoxysii-0.0.6.tgz#c99d5566930653e903ab96737329acfa8402ed37"
- integrity sha512-TI51bIpChfsla9aRbjip6zvTbz6rpsqKgM7MqJvSfeFF6G5xLXQcbSC9u/1hOnOOazd7HaqA9NvaXQdeKCb3yw==
-
-"@peculiar/asn1-schema@^2.3.13":
- version "2.4.0"
- resolved "https://registry.yarnpkg.com/@peculiar/asn1-schema/-/asn1-schema-2.4.0.tgz#e3aa7917d433b4c3fcfa1fcb57eac233b1c38787"
- integrity sha512-umbembjIWOrPSOzEGG5vxFLkeM8kzIhLkgigtsOrfLKnuzxWxejAcUX+q/SoZCdemlODOcr5WiYa7+dIEzBXZQ==
- dependencies:
- asn1js "^3.0.6"
- pvtsutils "^1.3.6"
- tslib "^2.8.1"
-
-"@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2":
- version "1.1.2"
- resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf"
- integrity sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==
-
-"@protobufjs/base64@^1.1.2":
- version "1.1.2"
- resolved "https://registry.yarnpkg.com/@protobufjs/base64/-/base64-1.1.2.tgz#4c85730e59b9a1f1f349047dbf24296034bb2735"
- integrity sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==
-
-"@protobufjs/codegen@^2.0.4":
- version "2.0.4"
- resolved "https://registry.yarnpkg.com/@protobufjs/codegen/-/codegen-2.0.4.tgz#7ef37f0d010fb028ad1ad59722e506d9262815cb"
- integrity sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==
-
-"@protobufjs/eventemitter@^1.1.0":
- version "1.1.0"
- resolved "https://registry.yarnpkg.com/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz#355cbc98bafad5978f9ed095f397621f1d066b70"
- integrity sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==
-
-"@protobufjs/fetch@^1.1.0":
- version "1.1.0"
- resolved "https://registry.yarnpkg.com/@protobufjs/fetch/-/fetch-1.1.0.tgz#ba99fb598614af65700c1619ff06d454b0d84c45"
- integrity sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==
- dependencies:
- "@protobufjs/aspromise" "^1.1.1"
- "@protobufjs/inquire" "^1.1.0"
-
-"@protobufjs/float@^1.0.2":
- version "1.0.2"
- resolved "https://registry.yarnpkg.com/@protobufjs/float/-/float-1.0.2.tgz#5e9e1abdcb73fc0a7cb8b291df78c8cbd97b87d1"
- integrity sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==
-
-"@protobufjs/inquire@^1.1.0":
- version "1.1.0"
- resolved "https://registry.yarnpkg.com/@protobufjs/inquire/-/inquire-1.1.0.tgz#ff200e3e7cf2429e2dcafc1140828e8cc638f089"
- integrity sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==
-
-"@protobufjs/path@^1.1.2":
- version "1.1.2"
- resolved "https://registry.yarnpkg.com/@protobufjs/path/-/path-1.1.2.tgz#6cc2b20c5c9ad6ad0dccfd21ca7673d8d7fbf68d"
- integrity sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==
-
-"@protobufjs/pool@^1.1.0":
- version "1.1.0"
- resolved "https://registry.yarnpkg.com/@protobufjs/pool/-/pool-1.1.0.tgz#09fd15f2d6d3abfa9b65bc366506d6ad7846ff54"
- integrity sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==
-
-"@protobufjs/utf8@^1.1.0":
- version "1.1.0"
- resolved "https://registry.yarnpkg.com/@protobufjs/utf8/-/utf8-1.1.0.tgz#a777360b5b39a1a2e5106f8e858f2fd2d060c570"
- integrity sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==
-
-"@safe-global/api-kit@^4.0.0":
- version "4.0.0"
- resolved "https://registry.yarnpkg.com/@safe-global/api-kit/-/api-kit-4.0.0.tgz#7c686258041bc0db5c5eac6a8f33862d679e7b85"
- integrity sha512-xtLLi6OXguLw8cLoYnzCxqmirzRK4sSORxaiBDXdxJfBXIZLLKvYwQyDjsPL+2W4jKlJVcSLCw5EfolJahNMYg==
- dependencies:
- "@safe-global/protocol-kit" "^6.1.0"
- "@safe-global/types-kit" "^3.0.0"
- node-fetch "^2.7.0"
- viem "^2.21.8"
-
-"@safe-global/protocol-kit@^6.1.0":
- version "6.1.0"
- resolved "https://registry.yarnpkg.com/@safe-global/protocol-kit/-/protocol-kit-6.1.0.tgz#0c987b08acb274da08a7b04ee06277e40b5d3fa8"
- integrity sha512-2f8jH6SLeNGZB6HnvU8aDV4L4HLOelwW042yGg/s6sZAJEvh7I+yejmIbsK8o02+fbXgdssNdqTD4I90erBiZQ==
- dependencies:
- "@safe-global/safe-deployments" "^1.37.35"
- "@safe-global/safe-modules-deployments" "^2.2.10"
- "@safe-global/types-kit" "^3.0.0"
- abitype "^1.0.2"
- semver "^7.7.1"
- viem "^2.21.8"
- optionalDependencies:
- "@noble/curves" "^1.6.0"
- "@peculiar/asn1-schema" "^2.3.13"
-
-"@safe-global/safe-deployments@^1.37.35":
- version "1.37.40"
- resolved "https://registry.yarnpkg.com/@safe-global/safe-deployments/-/safe-deployments-1.37.40.tgz#42b1f991a6f8fc74bcc8054b3a98eb5ba32205ab"
- integrity sha512-jLvoFYPmCR75SNOLzLYYMD8qxYsiozDR3/hJXxv54aslmZKcirOKdZOSxQfhT9g7ga/q/v47lzG10x72cWJtzw==
- dependencies:
- semver "^7.6.2"
-
-"@safe-global/safe-modules-deployments@^2.2.10":
- version "2.2.13"
- resolved "https://registry.yarnpkg.com/@safe-global/safe-modules-deployments/-/safe-modules-deployments-2.2.13.tgz#90690cf30f28e388695e092b32d03f73719b1bc1"
- integrity sha512-vGEbRw1pL9wzvOrvN4g8r1SyD2lx2nqHr5pp1Y4pcXPA/BCJEo3z5DR9fx0PFk0dEgznB7QaaPUhnHae8vdPxw==
-
-"@safe-global/types-kit@^3.0.0":
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/@safe-global/types-kit/-/types-kit-3.0.0.tgz#b35826af0e417fa02a540b874c109b5ddb5ed086"
- integrity sha512-AZWIlR5MguDPdGiOj7BB4JQPY2afqmWQww1mu8m8Oi16HHBW99G01kFOu4NEHBwEU1cgwWOMY19hsI5KyL4W2w==
- dependencies:
- abitype "^1.0.2"
-
-"@scure/base@~1.2.5":
- version "1.2.6"
- resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.2.6.tgz#ca917184b8231394dd8847509c67a0be522e59f6"
- integrity sha512-g/nm5FgUa//MCj1gV09zTJTaM6KBAHqLN907YVQqf7zC49+DcO4B1so4ZX07Ef10Twr6nuqYEH9GEggFXA4Fmg==
-
-"@scure/bip32@1.7.0", "@scure/bip32@^1.7.0":
- version "1.7.0"
- resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.7.0.tgz#b8683bab172369f988f1589640e53c4606984219"
- integrity sha512-E4FFX/N3f4B80AKWp5dP6ow+flD1LQZo/w8UnLGYZO674jS6YnYeepycOOksv+vLPSpgN35wgKgy+ybfTb2SMw==
- dependencies:
- "@noble/curves" "~1.9.0"
- "@noble/hashes" "~1.8.0"
- "@scure/base" "~1.2.5"
-
-"@scure/bip39@1.6.0", "@scure/bip39@^1.6.0":
- version "1.6.0"
- resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.6.0.tgz#475970ace440d7be87a6086cbee77cb8f1a684f9"
- integrity sha512-+lF0BbLiJNwVlev4eKelw1WWLaiKXw7sSl8T6FvBlWkdX+94aGJ4o8XjUdlyhTCjd8c+B3KT3JfS8P0bLRNU6A==
- dependencies:
- "@noble/hashes" "~1.8.0"
- "@scure/base" "~1.2.5"
-
-"@types/node@>=13.7.0":
- version "24.3.0"
- resolved "https://registry.yarnpkg.com/@types/node/-/node-24.3.0.tgz#89b09f45cb9a8ee69466f18ee5864e4c3eb84dec"
- integrity sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow==
- dependencies:
- undici-types "~7.10.0"
-
-abitype@1.0.8, abitype@^1.0.2, abitype@^1.0.8:
- version "1.0.8"
- resolved "https://registry.yarnpkg.com/abitype/-/abitype-1.0.8.tgz#3554f28b2e9d6e9f35eb59878193eabd1b9f46ba"
- integrity sha512-ZeiI6h3GnW06uYDLx0etQtX/p8E24UaHHBj57RSjK7YBFe7iuVn07EDpOeP451D06sF27VOz9JJPlIKJmXgkEg==
-
-asn1js@^3.0.6:
- version "3.0.6"
- resolved "https://registry.yarnpkg.com/asn1js/-/asn1js-3.0.6.tgz#53e002ebe00c5f7fd77c1c047c3557d7c04dce25"
- integrity sha512-UOCGPYbl0tv8+006qks/dTgV9ajs97X2p0FAbyS2iyCRrmLSRolDaHdp+v/CLgnzHc3fVB+CwYiUmei7ndFcgA==
- dependencies:
- pvtsutils "^1.3.6"
- pvutils "^1.1.3"
- tslib "^2.8.1"
-
-bech32@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/bech32/-/bech32-2.0.0.tgz#078d3686535075c8c79709f054b1b226a133b355"
- integrity sha512-LcknSilhIGatDAsY1ak2I8VtGaHNhgMSYVxFrGLXv+xLHytaKZKcaUJJUE7qmBr7h33o5YQwP55pMI0xmkpJwg==
-
-bip39@^3.1.0:
- version "3.1.0"
- resolved "https://registry.yarnpkg.com/bip39/-/bip39-3.1.0.tgz#c55a418deaf48826a6ceb34ac55b3ee1577e18a3"
- integrity sha512-c9kiwdk45Do5GL0vJMe7tS95VjCii65mYAH7DfWl3uW8AVzXKQVUm64i3hzVybBDMp9r7j9iNxR85+ul8MdN/A==
- dependencies:
- "@noble/hashes" "^1.2.0"
-
-cborg@^2.0.3:
- version "2.0.5"
- resolved "https://registry.yarnpkg.com/cborg/-/cborg-2.0.5.tgz#b5393c8b1843d5c1a61f2b79b4c9f752052a4d44"
- integrity sha512-xVW1rSIw1ZXbkwl2XhJ7o/jAv0vnVoQv/QlfQxV8a7V5PlA4UU/AcIiXqmpyybwNWy/GPQU1m/aBVNIWr7/T0w==
-
-eventemitter3@5.0.1:
- version "5.0.1"
- resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-5.0.1.tgz#53f5ffd0a492ac800721bb42c66b841de96423c4"
- integrity sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==
-
-grpc-web@^1.5.0:
- version "1.5.0"
- resolved "https://registry.yarnpkg.com/grpc-web/-/grpc-web-1.5.0.tgz#154e4007ab59a94bf7726b87ef6c5bd8815ecf6e"
- integrity sha512-y1tS3BBIoiVSzKTDF3Hm7E8hV2n7YY7pO0Uo7depfWJqKzWE+SKr0jvHNIJsJJYILQlpYShpi/DRJJMbosgDMQ==
-
-isows@1.0.7:
- version "1.0.7"
- resolved "https://registry.yarnpkg.com/isows/-/isows-1.0.7.tgz#1c06400b7eed216fbba3bcbd68f12490fc342915"
- integrity sha512-I1fSfDCZL5P0v33sVqeTDSpcstAg/N+wF5HS033mogOVIp4B+oHC7oOCsA3axAbBSGTJ8QubbNmnIRN/h8U7hg==
-
-long@^5.0.0:
- version "5.3.2"
- resolved "https://registry.yarnpkg.com/long/-/long-5.3.2.tgz#1d84463095999262d7d7b7f8bfd4a8cc55167f83"
- integrity sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==
-
-node-fetch@^2.7.0:
- version "2.7.0"
- resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d"
- integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==
- dependencies:
- whatwg-url "^5.0.0"
-
-ox@0.8.7:
- version "0.8.7"
- resolved "https://registry.yarnpkg.com/ox/-/ox-0.8.7.tgz#234812627f931aaf5bd45728a50c9a42a26e76db"
- integrity sha512-W1f0FiMf9NZqtHPEDEAEkyzZDwbIKfmH2qmQx8NNiQ/9JhxrSblmtLJsSfTtQG5YKowLOnBlLVguCyxm/7ztxw==
- dependencies:
- "@adraffy/ens-normalize" "^1.11.0"
- "@noble/ciphers" "^1.3.0"
- "@noble/curves" "^1.9.1"
- "@noble/hashes" "^1.8.0"
- "@scure/bip32" "^1.7.0"
- "@scure/bip39" "^1.6.0"
- abitype "^1.0.8"
- eventemitter3 "5.0.1"
-
-protobufjs@~7.4.0:
- version "7.4.0"
- resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.4.0.tgz#7efe324ce9b3b61c82aae5de810d287bc08a248a"
- integrity sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==
- dependencies:
- "@protobufjs/aspromise" "^1.1.2"
- "@protobufjs/base64" "^1.1.2"
- "@protobufjs/codegen" "^2.0.4"
- "@protobufjs/eventemitter" "^1.1.0"
- "@protobufjs/fetch" "^1.1.0"
- "@protobufjs/float" "^1.0.2"
- "@protobufjs/inquire" "^1.1.0"
- "@protobufjs/path" "^1.1.2"
- "@protobufjs/pool" "^1.1.0"
- "@protobufjs/utf8" "^1.1.0"
- "@types/node" ">=13.7.0"
- long "^5.0.0"
-
-pvtsutils@^1.3.6:
- version "1.3.6"
- resolved "https://registry.yarnpkg.com/pvtsutils/-/pvtsutils-1.3.6.tgz#ec46e34db7422b9e4fdc5490578c1883657d6001"
- integrity sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg==
- dependencies:
- tslib "^2.8.1"
-
-pvutils@^1.1.3:
- version "1.1.3"
- resolved "https://registry.yarnpkg.com/pvutils/-/pvutils-1.1.3.tgz#f35fc1d27e7cd3dfbd39c0826d173e806a03f5a3"
- integrity sha512-pMpnA0qRdFp32b1sJl1wOJNxZLQ2cbQx+k6tjNtZ8CpvVhNqEPRgivZ2WOUev2YMajecdH7ctUPDvEe87nariQ==
-
-semver@^7.6.2, semver@^7.7.1:
- version "7.7.2"
- resolved "https://registry.yarnpkg.com/semver/-/semver-7.7.2.tgz#67d99fdcd35cec21e6f8b87a7fd515a33f982b58"
- integrity sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==
-
-tr46@~0.0.3:
- version "0.0.3"
- resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
- integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==
-
-tslib@^2.8.1:
- version "2.8.1"
- resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f"
- integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==
-
-tweetnacl@^1.0.3:
- version "1.0.3"
- resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596"
- integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==
-
-undici-types@~7.10.0:
- version "7.10.0"
- resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.10.0.tgz#4ac2e058ce56b462b056e629cc6a02393d3ff350"
- integrity sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==
-
-viem@^2.21.8, viem@^2.31.3:
- version "2.34.0"
- resolved "https://registry.yarnpkg.com/viem/-/viem-2.34.0.tgz#566b15838d3121d03eaa9bdb2b8bf9b86c37d152"
- integrity sha512-HJZG9Wt0DLX042MG0PK17tpataxtdAEhpta9/Q44FqKwy3xZMI5Lx4jF+zZPuXFuYjZ68R0PXqRwlswHs6r4gA==
- dependencies:
- "@noble/curves" "1.9.6"
- "@noble/hashes" "1.8.0"
- "@scure/bip32" "1.7.0"
- "@scure/bip39" "1.6.0"
- abitype "1.0.8"
- isows "1.0.7"
- ox "0.8.7"
- ws "8.18.3"
-
-webidl-conversions@^3.0.0:
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"
- integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==
-
-whatwg-url@^5.0.0:
- version "5.0.0"
- resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d"
- integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==
- dependencies:
- tr46 "~0.0.3"
- webidl-conversions "^3.0.0"
-
-ws@8.18.3:
- version "8.18.3"
- resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.3.tgz#b56b88abffde62791c639170400c93dcb0c95472"
- integrity sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==
-
-xhr2@^0.2.1:
- version "0.2.1"
- resolved "https://registry.yarnpkg.com/xhr2/-/xhr2-0.2.1.tgz#4e73adc4f9cfec9cbd2157f73efdce3a5f108a93"
- integrity sha512-sID0rrVCqkVNUn8t6xuv9+6FViXjUVXq8H5rWOH2rz9fDNQEd4g0EA2XlcEdJXRz5BMEn4O1pJFdT+z4YHhoWw==
-
-yaml@^2.8.1:
- version "2.8.1"
- resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.8.1.tgz#1870aa02b631f7e8328b93f8bc574fac5d6c4d79"
- integrity sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==
diff --git a/scripts/run_checks.sh b/scripts/run_checks.sh
deleted file mode 100755
index e5c9addd..00000000
--- a/scripts/run_checks.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-# This script runs ruff, mypy, and pytest, similar to the CI.
-
-# Exit immediately if a command exits with a non-zero status.
-set -e
-
-# Install dependencies if the virtual environment doesn't exist
-if [ ! -d ".venv" ]; then
- echo "Creating virtual environment..."
- python3 -m venv .venv
- source .venv/bin/activate
- pip install "uv==0.2.22"
- uv pip install -e .[dev]
-else
- source .venv/bin/activate
-fi
-
-# Run isort
-echo "Running isort..."
-uv run isort .
-
-# Run ruff
-echo "Running ruff..."
-uv run ruff format .
-uv run ruff check .
-
-# Run mypy
-echo "Running mypy..."
-uv run mypy src
-
-# Run pytest
-echo "Running pytest..."
-uv run pytest
diff --git a/scripts/start_server.sh b/scripts/start_server.sh
deleted file mode 100755
index b2abcae0..00000000
--- a/scripts/start_server.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# Startup script for Talos server with database migrations
-
-set -e
-
-echo "Starting Talos server..."
-
-# Run database migrations
-echo "Running database migrations..."
-python -m talos migrations upgrade
-
-# Start the FastAPI server
-echo "Starting FastAPI server..."
-python -m talos.cli.server
diff --git a/scripts/test_container.sh b/scripts/test_container.sh
deleted file mode 100755
index a12852e7..00000000
--- a/scripts/test_container.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-# Test script for Talos Docker container
-set -e
-
-# Configuration
-TALOS_PORT=${TALOS_PORT:-8080}
-
-echo "Testing Talos Docker container on port $TALOS_PORT..."
-
-# Check if container is running
-if ! docker ps | grep -q talos-agent; then
- echo "❌ Talos container is not running"
- echo "Start it with: docker-compose up -d"
- exit 1
-fi
-
-echo "✅ Container is running"
-
-# Test health endpoint
-echo "Testing health endpoint..."
-if curl -f http://localhost:$TALOS_PORT/health > /dev/null 2>&1; then
- echo "✅ Health endpoint responding"
-else
- echo "❌ Health endpoint not responding"
- exit 1
-fi
-
-# Test API documentation
-echo "Testing API documentation..."
-if curl -f http://localhost:$TALOS_PORT/docs > /dev/null 2>&1; then
- echo "✅ API documentation accessible"
-else
- echo "❌ API documentation not accessible"
- exit 1
-fi
-
-# Test root endpoint
-echo "Testing root endpoint..."
-if curl -f http://localhost:$TALOS_PORT/ > /dev/null 2>&1; then
- echo "✅ Root endpoint responding"
-else
- echo "❌ Root endpoint not responding"
- exit 1
-fi
-
-echo "🎉 All tests passed! Container is working correctly."
diff --git a/scripts/verify_container_image.sh b/scripts/verify_container_image.sh
deleted file mode 100755
index 4dcdd5a1..00000000
--- a/scripts/verify_container_image.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -e
-
-COMPOSE_FILE=$(yq -r '.artifacts.container.compose' rofl.yaml)
-TALOS_AGENT_IMAGE=$(yq -r '.services."talos-agent".image' ${COMPOSE_FILE})
-
-if [[ "${TALOS_AGENT_IMAGE}" != "${EXPECTED_TALOS_AGENT_IMAGE}" ]]; then
- echo "Talos agent image mismatch:"
- echo ""
- echo " Configured in ${COMPOSE_FILE}:"
- echo " ${TALOS_AGENT_IMAGE}"
- echo ""
- echo " Built locally:"
- echo " ${EXPECTED_TALOS_AGENT_IMAGE}"
- echo ""
- exit 1
-fi
diff --git a/src/talos/__init__.py b/src/talos/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/cli/README.md b/src/talos/cli/README.md
deleted file mode 100644
index 54d9d5dd..00000000
--- a/src/talos/cli/README.md
+++ /dev/null
@@ -1,210 +0,0 @@
-# Talos CLI
-
-The Talos CLI is the main entry point for interacting with the Talos agent.
-
-## Installation
-
-The CLI is installed as part of the `talos` package.
-
-## Usage
-
-The CLI can be run in multiple modes: interactive, non-interactive, and daemon mode.
-
-### Interactive Mode
-
-To enter interactive mode, run `talos` without any arguments:
-
-```bash
-uv run talos
->> your query
-```
-
-### Non-Interactive Mode
-
-In non-interactive mode, you can run a single query and the agent will exit:
-
-```bash
-uv run talos "your query"
-```
-
-### Daemon Mode
-
-Run the agent continuously for scheduled operations:
-
-```bash
-uv run talos daemon
-```
-
-## Commands
-
-The Talos CLI has the following commands and subcommands:
-
-### `twitter`
-
-Twitter-related operations and analysis.
-
-#### `get-user-prompt `
-
-Gets the general voice of a user as a structured persona analysis.
-
-```bash
-uv run talos twitter get-user-prompt
-```
-
-#### `get-query-sentiment [--start-time]`
-
-Gets the general sentiment/report on a specific query.
-
-```bash
-uv run talos twitter get-query-sentiment
-uv run talos twitter get-query-sentiment --start-time "2023-01-01T00:00:00Z"
-```
-
-#### `integrate-voice [--username]`
-
-Integrate Twitter voice analysis into agent communication.
-
-```bash
-uv run talos twitter integrate-voice
-uv run talos twitter integrate-voice --username talos_is
-```
-
-### `github`
-
-GitHub repository management and PR reviews. See [GitHub CLI Commands](../CLI_GITHUB_COMMANDS.md) for detailed documentation.
-
-### `proposals`
-
-Governance proposal evaluation.
-
-#### `eval --file `
-
-Evaluates a proposal from a file.
-
-```bash
-uv run talos proposals eval --file proposal.txt
-```
-
-### `memory`
-
-Memory management and search operations.
-
-#### `list [--user-id] [--filter-user] [--use-database] [--verbose]`
-
-List all memories with optional user filtering.
-
-```bash
-uv run talos memory list
-uv run talos memory list --user-id user123 --verbose
-```
-
-#### `search [--user-id] [--limit] [--use-database]`
-
-Search memories using semantic similarity.
-
-```bash
-uv run talos memory search "governance proposal"
-uv run talos memory search "twitter sentiment" --limit 10
-```
-
-#### `flush [--user-id] [--use-database]`
-
-Flush unsaved memories to disk or delete user memories.
-
-```bash
-uv run talos memory flush --user-id user123
-```
-
-### `arbiscan`
-
-Arbitrum blockchain contract source code retrieval.
-
-#### `get-source-code [--api-key] [--chain-id] [--format]`
-
-Gets the source code of a verified smart contract from Arbiscan.
-
-```bash
-uv run talos arbiscan get-source-code 0x1234...
-uv run talos arbiscan get-source-code 0x1234... --format json
-uv run talos arbiscan get-source-code 0x1234... --chain-id 42170
-```
-
-### Core Commands
-
-#### `generate-keys [--key-dir]`
-
-Generates a new RSA key pair.
-
-```bash
-uv run talos generate-keys
-uv run talos generate-keys --key-dir /path/to/keys
-```
-
-#### `get-public-key [--key-dir]`
-
-Gets the public key.
-
-```bash
-uv run talos get-public-key
-```
-
-#### `encrypt `
-
-Encrypts a message.
-
-```bash
-uv run talos encrypt "secret message" public_key.pem
-```
-
-#### `decrypt [--key-dir]`
-
-Decrypts a message.
-
-```bash
-uv run talos decrypt
-```
-
-#### `daemon [--prompts-dir] [--model-name] [--temperature]`
-
-Run the Talos agent in daemon mode for continuous operation.
-
-```bash
-uv run talos daemon
-uv run talos daemon --model-name gpt-5 --temperature 0.1
-```
-
-#### `cleanup-users [--older-than] [--dry-run]`
-
-Clean up temporary users and their conversation data.
-
-```bash
-uv run talos cleanup-users --older-than 24 --dry-run
-uv run talos cleanup-users --older-than 48
-```
-
-#### `db-stats`
-
-Show database statistics.
-
-```bash
-uv run talos db-stats
-```
-
-## Global Options
-
-- `--verbose, -v`: Enable verbose output
-- `--user-id, -u`: User identifier for conversation tracking
-- `--use-database`: Use database for conversation storage instead of files
-- `--help`: Show help information
-
-## Environment Variables
-
-### Required
-- `OPENAI_API_KEY`: OpenAI API key for AI functionality
-- `PINATA_API_KEY`: Pinata API key for IPFS operations
-- `PINATA_SECRET_API_KEY`: Pinata secret API key for IPFS operations
-
-### Optional
-- `GITHUB_API_TOKEN`: GitHub API token for repository operations
-- `TWITTER_BEARER_TOKEN`: Twitter Bearer token for social media analysis
-- `ARBISCAN_API_KEY`: Arbiscan API key for higher rate limits
diff --git a/src/talos/cli/__init__.py b/src/talos/cli/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/cli/arbiscan.py b/src/talos/cli/arbiscan.py
deleted file mode 100644
index 1678a2c6..00000000
--- a/src/talos/cli/arbiscan.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from typing import Optional
-import os
-import typer
-import json
-
-from talos.utils.arbiscan import get_contract_source_code
-
-arbiscan_app = typer.Typer()
-
-
-@arbiscan_app.command("get-source-code")
-def get_source_code(
- contract_address: str = typer.Argument(..., help="The contract address to get source code for"),
- api_key: Optional[str] = typer.Option(None, "--api-key", "-k", help="Optional API key for higher rate limits"),
- chain_id: int = typer.Option(42161, "--chain-id", "-c", help="Chain ID (42161 for Arbitrum One, 42170 for Nova, 421614 for Sepolia)"),
- output_format: str = typer.Option("formatted", "--format", "-f", help="Output format: 'formatted', 'json', or 'source-only'"),
-):
- """
- Gets the source code of a verified smart contract from Arbiscan.
- """
- try:
- api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- contract_data = get_contract_source_code(
- contract_address=contract_address,
- api_key=api_key,
- chain_id=chain_id
- )
-
- if output_format == "json":
- print(json.dumps(contract_data.model_dump(), indent=2))
- elif output_format == "source-only":
- print(contract_data.source_code)
- else:
- print(f"=== Contract Source Code for {contract_address} ===\n")
- print(f"Contract Name: {contract_data.contract_name}")
- print(f"Compiler Version: {contract_data.compiler_version}")
- print(f"Optimization Used: {contract_data.optimization_used}")
- if contract_data.optimization_used == "1":
- print(f"Optimization Runs: {contract_data.runs}")
- print(f"License Type: {contract_data.license_type}")
- if contract_data.proxy == "1":
- print(f"Proxy Implementation: {contract_data.implementation}")
- print("\n" + "="*50 + " SOURCE CODE " + "="*50)
- print(contract_data.source_code)
-
- except ValueError as e:
- error_msg = str(e)
- if "NOTOK" in error_msg or "Missing/Invalid API Key" in error_msg:
- provided_api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- if not provided_api_key:
- typer.echo("Error: Arbiscan API key is required to get contract source code.", err=True)
- typer.echo("Please provide an API key using the --api-key option or set the ARBISCAN_API_KEY environment variable.", err=True)
- typer.echo("You can get a free API key from https://arbiscan.io/apis", err=True)
- else:
- typer.echo("Error: Invalid Arbiscan API key provided.", err=True)
- typer.echo("Please check your API key and try again.", err=True)
- typer.echo("You can get a free API key from https://arbiscan.io/apis", err=True)
- else:
- typer.echo(f"Error: {error_msg}", err=True)
- raise typer.Exit(1)
- except Exception as e:
- typer.echo(f"Unexpected error: {e}", err=True)
- raise typer.Exit(1)
diff --git a/src/talos/cli/contracts.py b/src/talos/cli/contracts.py
deleted file mode 100644
index 120b6db3..00000000
--- a/src/talos/cli/contracts.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import json
-import os
-from typing import Optional
-
-import typer
-
-from talos.database.models import ContractDeployment
-from talos.database.session import get_session
-from talos.tools.contract_deployment import ContractDeploymentTool
-from talos.utils.contract_deployment import calculate_contract_signature
-
-contracts_app = typer.Typer()
-
-
-@contracts_app.command("deploy")
-def deploy_contract(
- bytecode_file: str = typer.Argument(..., help="Path to file containing contract bytecode"),
- salt: str = typer.Argument(..., help="Salt for CREATE2 deployment"),
- chain_id: int = typer.Option(42161, "--chain-id", "-c", help="Chain ID to deploy on"),
- constructor_args: Optional[str] = typer.Option(
- None, "--constructor-args", help="JSON array of constructor arguments"
- ),
- check: bool = typer.Option(False, "--check", help="Check for duplicate deployment and prevent if found"),
- gas_limit: Optional[int] = typer.Option(None, "--gas-limit", help="Gas limit for deployment"),
- output_format: str = typer.Option("formatted", "--format", help="Output format: 'formatted' or 'json'"),
-):
- """
- Deploy a smart contract with optional duplicate checking.
- """
- try:
- if not os.path.exists(bytecode_file):
- typer.echo(f"Error: Bytecode file not found: {bytecode_file}", err=True)
- raise typer.Exit(1)
-
- with open(bytecode_file, "r") as f:
- bytecode = f.read().strip()
-
- parsed_constructor_args = None
- if constructor_args:
- try:
- parsed_constructor_args = json.loads(constructor_args)
- except json.JSONDecodeError:
- typer.echo("Error: Invalid JSON in constructor arguments", err=True)
- raise typer.Exit(1)
-
- tool = ContractDeploymentTool()
- result = tool._run_unsupervised(
- bytecode=bytecode,
- salt=salt,
- chain_id=chain_id,
- constructor_args=parsed_constructor_args,
- check_duplicates=check,
- gas_limit=gas_limit,
- )
-
- if output_format == "json":
- print(json.dumps(result.model_dump(), indent=2))
- else:
- print("=== Contract Deployment Result ===")
- print(f"Contract Address: {result.contract_address}")
- print(f"Transaction Hash: {result.transaction_hash}")
- print(f"Chain ID: {result.chain_id}")
- print(f"Contract Signature: {result.contract_signature}")
- if result.gas_used:
- print(f"Gas Used: {result.gas_used}")
- if result.was_duplicate:
- print("⚠️ Duplicate deployment prevented (existing contract returned)")
- else:
- print("✅ New contract deployed successfully")
-
- except ValueError as e:
- typer.echo(f"Error: {e}", err=True)
- raise typer.Exit(1)
- except Exception as e:
- typer.echo(f"Unexpected error: {e}", err=True)
- raise typer.Exit(1)
-
-
-@contracts_app.command("list")
-def list_deployments(
- chain_id: Optional[int] = typer.Option(None, "--chain-id", "-c", help="Filter by chain ID"),
- limit: int = typer.Option(10, "--limit", "-l", help="Maximum number of deployments to show"),
-):
- """
- List contract deployments.
- """
- with get_session() as session:
- query = session.query(ContractDeployment)
-
- if chain_id:
- query = query.filter(ContractDeployment.chain_id == chain_id)
-
- deployments = query.order_by(ContractDeployment.deployed_at.desc()).limit(limit).all()
-
- if not deployments:
- print("No deployments found.")
- return
-
- print("=== Recent Contract Deployments ===")
- for deployment in deployments:
- print(f"Address: {deployment.contract_address}")
- print(f"Chain ID: {deployment.chain_id}")
- print(f"Signature: {deployment.contract_signature}")
- print(f"Deployed: {deployment.deployed_at}")
- print(f"TX Hash: {deployment.transaction_hash}")
- print("-" * 50)
-
-
-@contracts_app.command("check-duplicate")
-def check_duplicate(
- bytecode_file: str = typer.Argument(..., help="Path to file containing contract bytecode"),
- salt: str = typer.Argument(..., help="Salt for CREATE2 deployment"),
- chain_id: int = typer.Option(42161, "--chain-id", "-c", help="Chain ID to check"),
-):
- """
- Check if a contract would be a duplicate deployment.
- """
- try:
- if not os.path.exists(bytecode_file):
- typer.echo(f"Error: Bytecode file not found: {bytecode_file}", err=True)
- raise typer.Exit(1)
-
- with open(bytecode_file, "r") as f:
- bytecode = f.read().strip()
-
- signature = calculate_contract_signature(bytecode, salt)
-
- with get_session() as session:
- existing = (
- session.query(ContractDeployment)
- .filter(ContractDeployment.contract_signature == signature, ContractDeployment.chain_id == chain_id)
- .first()
- )
-
- if existing:
- print("⚠️ Duplicate deployment detected!")
- print(f"Existing contract: {existing.contract_address}")
- print(f"Deployed at: {existing.deployed_at}")
- print(f"Transaction: {existing.transaction_hash}")
- else:
- print("✅ No duplicate found. Safe to deploy.")
- print(f"Contract signature: {signature}")
-
- except Exception as e:
- typer.echo(f"Error: {e}", err=True)
- raise typer.Exit(1)
diff --git a/src/talos/cli/daemon.py b/src/talos/cli/daemon.py
deleted file mode 100644
index 6fbdd27e..00000000
--- a/src/talos/cli/daemon.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-import logging
-import os
-import signal
-import sys
-from typing import Optional
-
-from langchain_openai import ChatOpenAI
-
-from talos.core.main_agent import MainAgent
-from talos.settings import OpenAISettings
-
-logger = logging.getLogger(__name__)
-
-
-class TalosDaemon:
- def __init__(self, prompts_dir: str = "src/talos/prompts", model_name: str = "gpt-5", temperature: float = 0.0):
- self.prompts_dir = prompts_dir
- self.model_name = model_name
- self.temperature = temperature
- self.main_agent: Optional[MainAgent] = None
- self.shutdown_event = asyncio.Event()
-
- def _validate_environment(self) -> None:
- OpenAISettings()
-
- if not os.path.exists(self.prompts_dir):
- raise FileNotFoundError(f"Prompts directory not found at {self.prompts_dir}")
-
- def _setup_signal_handlers(self) -> None:
- def signal_handler(signum: int, frame) -> None:
- logger.info(f"Received signal {signum}, initiating graceful shutdown...")
- asyncio.create_task(self._shutdown())
-
- signal.signal(signal.SIGTERM, signal_handler)
- signal.signal(signal.SIGINT, signal_handler)
-
- async def _shutdown(self) -> None:
- logger.info("Starting graceful shutdown...")
-
- if self.main_agent and self.main_agent.job_scheduler:
- logger.info("Stopping job scheduler...")
- self.main_agent.job_scheduler.stop()
- logger.info("Job scheduler stopped")
-
- self.shutdown_event.set()
- logger.info("Shutdown complete")
-
- def _initialize_agent(self) -> None:
- logger.info("Initializing MainAgent...")
-
- model = ChatOpenAI(model=self.model_name, temperature=self.temperature)
-
- self.main_agent = MainAgent(
- prompts_dir=self.prompts_dir,
- model=model,
- schema=None,
- )
-
- logger.info("MainAgent initialized successfully")
-
- if self.main_agent.startup_task_manager:
- logger.info("Executing startup tasks...")
- import asyncio
- asyncio.create_task(self.main_agent.startup_task_manager.execute_pending_tasks())
- logger.info("Startup tasks execution initiated")
-
- if self.main_agent.job_scheduler:
- logger.info(f"Job scheduler is running: {self.main_agent.job_scheduler.is_running()}")
- scheduled_jobs = self.main_agent.list_scheduled_jobs()
- logger.info(f"Number of scheduled jobs: {len(scheduled_jobs)}")
- for job in scheduled_jobs:
- logger.info(f" - {job}")
-
- async def run(self) -> None:
- try:
- self._validate_environment()
- self._setup_signal_handlers()
- self._initialize_agent()
-
- logger.info("Talos daemon started successfully. Waiting for scheduled jobs...")
- logger.info("Send SIGTERM or SIGINT to gracefully shutdown the daemon.")
-
- await self.shutdown_event.wait()
-
- except Exception as e:
- logger.error(f"Error in daemon: {e}")
- sys.exit(1)
-
-
-async def main() -> None:
- logging.basicConfig(
- level=logging.INFO,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
- )
-
- daemon = TalosDaemon()
- await daemon.run()
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/src/talos/cli/dataset.py b/src/talos/cli/dataset.py
deleted file mode 100644
index 2164d96f..00000000
--- a/src/talos/cli/dataset.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import typer
-from typing import Optional
-
-dataset_app = typer.Typer()
-
-
-@dataset_app.command("add")
-def add_dataset(
- name: str = typer.Argument(..., help="Name for the dataset"),
- source: str = typer.Argument(..., help="IPFS hash or URL of the document"),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID for dataset isolation"),
- chunk_size: int = typer.Option(1000, "--chunk-size", help="Maximum size of each text chunk"),
- chunk_overlap: int = typer.Option(200, "--chunk-overlap", help="Number of characters to overlap between chunks"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend for persistence"),
- verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output")
-):
- """Add a dataset from IPFS hash or URL with intelligent chunking."""
- try:
- from talos.data.dataset_manager import DatasetManager
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose:
- print(f"Generated temporary user ID: {user_id}")
-
- dataset_manager = DatasetManager(
- verbose=verbose,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- embeddings=embeddings_model
- )
- else:
- dataset_manager = DatasetManager(verbose=verbose, embeddings=embeddings_model)
-
- try:
- existing = dataset_manager.get_dataset(name)
- print(f"❌ Dataset '{name}' already exists with {len(existing)} chunks")
- return
- except ValueError:
- pass
-
- if source.startswith(('http://', 'https://')):
- dataset_manager.add_document_from_url(name, source, chunk_size, chunk_overlap)
- else:
- dataset_manager.add_document_from_ipfs(name, source, chunk_size, chunk_overlap)
-
- print(f"✅ Successfully added dataset '{name}' from {source}")
-
- except Exception as e:
- print(f"❌ Error: {e}")
- raise typer.Exit(1)
-
-
-@dataset_app.command("remove")
-def remove_dataset(
- name: str = typer.Argument(..., help="Name of the dataset to remove"),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID for dataset isolation"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend"),
- verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output")
-):
- """Remove a dataset by name."""
- try:
- from talos.data.dataset_manager import DatasetManager
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose:
- print(f"Generated temporary user ID: {user_id}")
-
- dataset_manager = DatasetManager(
- verbose=verbose,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- embeddings=embeddings_model
- )
- else:
- dataset_manager = DatasetManager(verbose=verbose, embeddings=embeddings_model)
-
- dataset_manager.remove_dataset(name)
- print(f"✅ Successfully removed dataset '{name}'")
-
- except Exception as e:
- print(f"❌ Error: {e}")
- raise typer.Exit(1)
-
-
-@dataset_app.command("list")
-def list_datasets(
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID for dataset isolation"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend"),
- verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output")
-):
- """List all datasets."""
- try:
- from talos.data.dataset_manager import DatasetManager
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose:
- print(f"Generated temporary user ID: {user_id}")
-
- dataset_manager = DatasetManager(
- verbose=verbose,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- embeddings=embeddings_model
- )
- else:
- dataset_manager = DatasetManager(verbose=verbose, embeddings=embeddings_model)
-
- datasets = dataset_manager.get_all_datasets()
-
- if not datasets:
- print("No datasets found.")
- return
-
- print(f"=== Found {len(datasets)} datasets ===")
- for name, data in datasets.items():
- print(f"📊 {name}: {len(data)} chunks")
-
- except Exception as e:
- print(f"❌ Error: {e}")
- raise typer.Exit(1)
-
-
-@dataset_app.command("search")
-def search_datasets(
- query: str = typer.Argument(..., help="Search query"),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID for dataset isolation"),
- limit: int = typer.Option(5, "--limit", "-l", help="Maximum number of results"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend"),
- verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output")
-):
- """Search datasets using semantic similarity."""
- try:
- from talos.data.dataset_manager import DatasetManager
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose:
- print(f"Generated temporary user ID: {user_id}")
-
- dataset_manager = DatasetManager(
- verbose=verbose,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- embeddings=embeddings_model
- )
- else:
- dataset_manager = DatasetManager(verbose=verbose, embeddings=embeddings_model)
-
- results = dataset_manager.search(query, k=limit)
-
- if not results:
- print(f"No results found for query: '{query}'")
- return
-
- print(f"=== Search Results for '{query}' ({len(results)} found) ===")
- for i, result in enumerate(results, 1):
- print(f"{i}. {result[:200]}...")
- print()
-
- except Exception as e:
- print(f"❌ Error: {e}")
- raise typer.Exit(1)
diff --git a/src/talos/cli/github.py b/src/talos/cli/github.py
deleted file mode 100644
index 7805ebb3..00000000
--- a/src/talos/cli/github.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from typing import Optional
-import typer
-from langchain_openai import ChatOpenAI
-
-from talos.cli.utils import get_repo_info
-
-github_app = typer.Typer()
-
-
-@github_app.command("get-prs")
-def get_prs(
- repo: Optional[str] = typer.Option(None, "--repo", "-r", help="Repository in format 'owner/repo'"),
- state: str = typer.Option("open", "--state", help="PR state: open, closed, or all")
-):
- """List all pull requests for a repository."""
- try:
- from talos.tools.github.tools import GithubTools
-
- owner, repo_name = get_repo_info(repo)
- github_tools = GithubTools()
- prs = github_tools.get_all_pull_requests(owner, repo_name, state)
-
- if not prs:
- print(f"No {state} pull requests found in {owner}/{repo_name}")
- return
-
- print(f"=== {state.title()} Pull Requests for {owner}/{repo_name} ===")
- for pr in prs:
- print(f"#{pr['number']}: {pr['title']}")
- print(f" URL: {pr['url']}")
- print()
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
-
-
-@github_app.command("review-pr")
-def review_pr(
- pr_number: int = typer.Argument(..., help="Pull request number to review"),
- repo: Optional[str] = typer.Option(None, "--repo", "-r", help="Repository in format 'owner/repo'"),
- post_review: bool = typer.Option(False, "--post", help="Post the review as a comment on the PR"),
- auto_approve: bool = typer.Option(False, "--auto-approve", help="Automatically approve if criteria are met")
-):
- """Review a pull request using AI analysis."""
- try:
- from talos.skills.pr_review import PRReviewSkill
- from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
- from talos.tools.github.tools import GithubTools
-
- owner, repo_name = get_repo_info(repo)
-
- model = ChatOpenAI(model="gpt-4", temperature=0.0)
- prompt_manager = FilePromptManager("src/talos/prompts")
- github_tools = GithubTools()
-
- skill = PRReviewSkill(
- llm=model,
- prompt_manager=prompt_manager,
- github_tools=github_tools
- )
-
- response = skill.run(
- user=owner,
- repo=repo_name,
- pr_number=pr_number,
- auto_comment=post_review,
- auto_approve=auto_approve
- )
-
- print(f"=== PR Review for {owner}/{repo_name}#{pr_number} ===")
- print(response.answers[0])
- if response.security_score:
- print(f"\nSecurity Score: {response.security_score}/100")
- if response.quality_score:
- print(f"Quality Score: {response.quality_score}/100")
- if response.recommendation:
- print(f"Recommendation: {response.recommendation}")
- if response.reasoning:
- print(f"Reasoning: {response.reasoning}")
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
-
-
-@github_app.command("approve-pr")
-def approve_pr(
- pr_number: int = typer.Argument(..., help="Pull request number to approve"),
- repo: Optional[str] = typer.Option(None, "--repo", "-r", help="Repository in format 'owner/repo'")
-):
- """Force approve a pull request."""
- try:
- from talos.tools.github.tools import GithubTools
-
- owner, repo_name = get_repo_info(repo)
- github_tools = GithubTools()
-
- github_tools.approve_pr(owner, repo_name, pr_number)
- print(f"✅ Approved PR #{pr_number} in {owner}/{repo_name}")
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
-
-
-@github_app.command("merge-pr")
-def merge_pr(
- pr_number: int = typer.Argument(..., help="Pull request number to merge"),
- repo: Optional[str] = typer.Option(None, "--repo", "-r", help="Repository in format 'owner/repo'")
-):
- """Merge a pull request."""
- try:
- from talos.tools.github.tools import GithubTools
-
- owner, repo_name = get_repo_info(repo)
- github_tools = GithubTools()
-
- github_tools.merge_pr(owner, repo_name, pr_number)
- print(f"🎉 Merged PR #{pr_number} in {owner}/{repo_name}")
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
diff --git a/src/talos/cli/main.py b/src/talos/cli/main.py
deleted file mode 100644
index fc023ce4..00000000
--- a/src/talos/cli/main.py
+++ /dev/null
@@ -1,211 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-import base64
-import os
-from typing import Optional
-
-import typer
-from langchain_core.messages import AIMessage
-from langchain_openai import ChatOpenAI
-from nacl.public import PublicKey, SealedBox
-
-from talos.cli.arbiscan import arbiscan_app
-from talos.cli.contracts import contracts_app
-from talos.cli.daemon import TalosDaemon
-from talos.cli.dataset import dataset_app
-from talos.cli.github import github_app
-from talos.cli.memory import memory_app
-from talos.cli.migrations import app as migrations_app
-from talos.cli.proposals import proposals_app
-from talos.cli.twitter import twitter_app
-from talos.core.main_agent import MainAgent
-from talos.database.utils import cleanup_temporary_users, get_user_stats
-from talos.services.key_management import KeyManagement
-from talos.settings import OpenAISettings
-
-app = typer.Typer()
-app.add_typer(twitter_app, name="twitter")
-app.add_typer(proposals_app, name="proposals")
-app.add_typer(github_app, name="github")
-app.add_typer(memory_app, name="memory")
-app.add_typer(dataset_app, name="dataset")
-app.add_typer(arbiscan_app, name="arbiscan")
-app.add_typer(contracts_app, name="contracts")
-app.add_typer(migrations_app, name="migrations")
-
-
-@app.callback()
-def callback(
- ctx: typer.Context,
- verbose: int = typer.Option(
- 0, "--verbose", "-v", count=True, help="Enable verbose output. Use -v for basic, -vv for detailed."
- ),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User identifier for conversation tracking."),
- use_database: bool = typer.Option(
- True, "--use-database", help="Use database for conversation storage instead of files."
- ),
-):
- """
- The main entry point for the Talos agent.
- """
- pass
-
-
-@app.command(name="main")
-def main_command(
- query: Optional[str] = typer.Argument(None, help="The query to send to the agent."),
- prompts_dir: str = "src/talos/prompts",
- model_name: str = "gpt-5",
- temperature: float = 0.0,
- verbose: int = typer.Option(
- 0, "--verbose", "-v", count=True, help="Enable verbose output. Use -v for basic, -vv for detailed."
- ),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User identifier for conversation tracking."),
- use_database: bool = typer.Option(
- True, "--use-database", help="Use database for conversation storage instead of files."
- ),
-) -> None:
- """
- The main entry point for the Talos agent.
- """
- if not os.path.exists(prompts_dir):
- raise FileNotFoundError(f"Prompts directory not found at {prompts_dir}")
-
- OpenAISettings()
-
- # Create the main agent
- model = ChatOpenAI(model=model_name, temperature=temperature)
- main_agent = MainAgent(
- prompts_dir=prompts_dir,
- model=model,
- schema=None,
- user_id=user_id,
- use_database_memory=use_database,
- verbose=verbose,
- )
-
- if not user_id and use_database:
- print(f"Generated temporary user ID: {main_agent.user_id}")
-
- if query:
- # Run the agent
- result = main_agent.run(query)
- if isinstance(result, AIMessage):
- print(result.content)
- else:
- print(result)
- return
-
- # Interactive mode
- print("Entering interactive mode. Type 'exit' to quit.")
- while True:
- try:
- user_input = input(">> ")
- if user_input.lower() == "exit":
- break
- result = main_agent.run(user_input)
- if isinstance(result, AIMessage):
- print(result.content)
- else:
- print(result)
- except KeyboardInterrupt:
- break
-
-
-@app.command()
-def generate_keys(key_dir: str = ".keys"):
- """
- Generates a new RSA key pair.
- """
- km = KeyManagement(key_dir=key_dir)
- km.generate_keys()
- print(f"Keys generated in {key_dir}")
-
-
-@app.command()
-def get_public_key(key_dir: str = ".keys"):
- """
- Gets the public key.
- """
- km = KeyManagement(key_dir=key_dir)
- print(km.get_public_key())
-
-
-@app.command()
-def encrypt(data: str, public_key_file: str):
- """
- Encrypts a message.
- """
- with open(public_key_file, "rb") as f:
- public_key = f.read()
-
- sealed_box = SealedBox(PublicKey(public_key))
- encrypted = sealed_box.encrypt(data.encode())
- print(base64.b64encode(encrypted).decode())
-
-
-@app.command()
-def decrypt(encrypted_data: str, key_dir: str = ".keys"):
- """
- Decrypts a message.
- """
- km = KeyManagement(key_dir=key_dir)
- decoded_data = base64.b64decode(encrypted_data)
- print(km.decrypt(decoded_data))
-
-
-@app.command()
-def daemon(
- prompts_dir: str = "src/talos/prompts",
- model_name: str = "gpt-5",
- temperature: float = 0.0,
-) -> None:
- """
- Run the Talos agent in daemon mode for continuous operation with scheduled jobs.
- """
- daemon = TalosDaemon(prompts_dir=prompts_dir, model_name=model_name, temperature=temperature)
- asyncio.run(daemon.run())
-
-
-@app.command()
-def cleanup_users(
- older_than_hours: int = typer.Option(
- 24, "--older-than", help="Remove temporary users inactive for this many hours."
- ),
- dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be deleted without actually deleting."),
-) -> None:
- """
- Clean up temporary users and their conversation data.
- """
- if dry_run:
- stats = get_user_stats()
- print("Current database stats:")
- print(f" Total users: {stats['total_users']}")
- print(f" Permanent users: {stats['permanent_users']}")
- print(f" Temporary users: {stats['temporary_users']}")
- print(f"\nWould clean up temporary users inactive for {older_than_hours} hours.")
- print("Use --no-dry-run to actually perform the cleanup.")
- else:
- count = cleanup_temporary_users(older_than_hours)
- print(f"Cleaned up {count} temporary users and their conversation data.")
-
-
-@app.command()
-def db_stats() -> None:
- """
- Show database statistics.
- """
- stats = get_user_stats()
- print("Database Statistics:")
- print(f" Total users: {stats['total_users']}")
- print(f" Permanent users: {stats['permanent_users']}")
- print(f" Temporary users: {stats['temporary_users']}")
-
- if stats["total_users"] > 0:
- temp_percentage = (stats["temporary_users"] / stats["total_users"]) * 100
- print(f" Temporary user percentage: {temp_percentage:.1f}%")
-
-
-if __name__ == "__main__":
- app()
diff --git a/src/talos/cli/memory.py b/src/talos/cli/memory.py
deleted file mode 100644
index 049695c0..00000000
--- a/src/talos/cli/memory.py
+++ /dev/null
@@ -1,192 +0,0 @@
-from datetime import datetime
-from typing import Optional
-import typer
-
-memory_app = typer.Typer()
-
-
-@memory_app.command("list")
-def list_memories(
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID to filter memories by"),
- filter_user: Optional[str] = typer.Option(None, "--filter-user", help="Filter memories by a different user"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend instead of files"),
- verbose: int = typer.Option(0, "--verbose", "-v", count=True, help="Enable verbose output. Use -v for basic, -vv for detailed.")
-):
- """List all memories with optional user filtering."""
- try:
- from talos.core.memory import Memory
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose >= 1:
- print(f"Generated temporary user ID: {user_id}")
-
- memory = Memory(
- embeddings_model=embeddings_model,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- verbose=verbose
- )
- else:
- from pathlib import Path
- memory_dir = Path("memory")
- memory_dir.mkdir(exist_ok=True)
-
- memory = Memory(
- file_path=memory_dir / "memories.json",
- embeddings_model=embeddings_model,
- history_file_path=memory_dir / "history.json",
- use_database=False,
- verbose=verbose
- )
-
- memories = memory.list_all(filter_user_id=filter_user)
-
- if not memories:
- print("No memories found.")
- return
-
- print(f"=== Found {len(memories)} memories ===")
- for i, mem in enumerate(memories, 1):
- timestamp_str = datetime.fromtimestamp(mem.timestamp).strftime("%Y-%m-%d %H:%M:%S")
- print(f"{i}. [{timestamp_str}] {mem.description}")
- if mem.metadata:
- print(f" Metadata: {mem.metadata}")
- print()
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
-
-
-@memory_app.command("search")
-def search_memories(
- query: str = typer.Argument(..., help="Search query for memories"),
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID to search memories for"),
- filter_user: Optional[str] = typer.Option(None, "--filter-user", help="Filter memories by a different user"),
- limit: int = typer.Option(5, "--limit", "-l", help="Maximum number of results to return"),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend instead of files"),
- verbose: int = typer.Option(0, "--verbose", "-v", count=True, help="Enable verbose output. Use -v for basic, -vv for detailed.")
-):
- """Search memories using semantic similarity with optional user filtering."""
- try:
- from talos.core.memory import Memory
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- from talos.database.session import init_database
- init_database()
-
- if not user_id:
- import uuid
- user_id = str(uuid.uuid4())
- if verbose >= 1:
- print(f"Generated temporary user ID: {user_id}")
-
- memory = Memory(
- embeddings_model=embeddings_model,
- user_id=user_id,
- session_id="cli-session",
- use_database=True,
- verbose=verbose
- )
- else:
- from pathlib import Path
- memory_dir = Path("memory")
- memory_dir.mkdir(exist_ok=True)
-
- memory = Memory(
- file_path=memory_dir / "memories.json",
- embeddings_model=embeddings_model,
- history_file_path=memory_dir / "history.json",
- use_database=False,
- verbose=verbose
- )
-
- if filter_user and use_database:
- memory = Memory(
- embeddings_model=embeddings_model,
- user_id=filter_user,
- session_id="cli-session",
- use_database=True,
- verbose=verbose
- )
- elif filter_user and not use_database:
- print("Warning: User filtering not supported with file-based backend")
-
- results = memory.search(query, k=limit)
-
- if not results:
- print(f"No memories found for query: '{query}'")
- return
-
- print(f"=== Search Results for '{query}' ({len(results)} found) ===")
- for i, mem in enumerate(results, 1):
- timestamp_str = datetime.fromtimestamp(mem.timestamp).strftime("%Y-%m-%d %H:%M:%S")
- print(f"{i}. [{timestamp_str}] {mem.description}")
- if mem.metadata:
- print(f" Metadata: {mem.metadata}")
- print()
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
-
-
-@memory_app.command("flush")
-def flush_memories(
- user_id: Optional[str] = typer.Option(None, "--user-id", "-u", help="User ID for database backend. If not provided with database backend, flushes ALL memories."),
- use_database: bool = typer.Option(True, "--use-database", help="Use database backend instead of files"),
- verbose: int = typer.Option(0, "--verbose", "-v", count=True, help="Enable verbose output. Use -v for basic, -vv for detailed.")
-):
- """Flush unsaved memories to disk. If no user_id provided with database backend, flushes ALL memories after confirmation."""
- try:
- from talos.core.memory import Memory
- from langchain_openai import OpenAIEmbeddings
- from talos.settings import OpenAISettings
-
- OpenAISettings()
- embeddings_model = OpenAIEmbeddings()
-
- if use_database:
- print("Database-based memory flushing is no longer supported.")
- print("Use file-based memory storage instead with --no-use-database flag.")
- return
- else:
- from pathlib import Path
- memory_dir = Path("memory")
- memory_dir.mkdir(exist_ok=True)
-
- memory = Memory(
- file_path=memory_dir / "memories.json",
- embeddings_model=embeddings_model,
- history_file_path=memory_dir / "history.json",
- use_database=False,
- verbose=verbose
- )
-
- if hasattr(memory, '_unsaved_count') and memory._unsaved_count > 0:
- unsaved_count = memory._unsaved_count
- memory.flush()
- print(f"Successfully flushed {unsaved_count} unsaved memories to disk.")
- else:
- print("No unsaved memories to flush.")
-
- except Exception as e:
- print(f"Error: {e}")
- raise typer.Exit(1)
diff --git a/src/talos/cli/migrations.py b/src/talos/cli/migrations.py
deleted file mode 100644
index 0d6fd554..00000000
--- a/src/talos/cli/migrations.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""CLI commands for database migrations."""
-
-import typer
-from sqlalchemy import create_engine
-
-from talos.database import (
- run_migrations,
- check_migration_status,
- create_migration,
- get_current_revision,
- get_head_revision,
-)
-from talos.database.session import get_database_url
-
-app = typer.Typer(help="Database migration commands")
-
-
-@app.command()
-def status() -> None:
- """Check the current migration status."""
- database_url = get_database_url()
- engine = create_engine(database_url)
-
- status_info = check_migration_status(engine)
-
- typer.echo("Database Migration Status:")
- typer.echo(f" Current revision: {status_info['current_revision']}")
- typer.echo(f" Head revision: {status_info['head_revision']}")
- typer.echo(f" Up to date: {status_info['is_up_to_date']}")
- typer.echo(f" Needs migration: {status_info['needs_migration']}")
-
-
-@app.command()
-def upgrade() -> None:
- """Run all pending migrations."""
- database_url = get_database_url()
- engine = create_engine(database_url)
-
- typer.echo("Running database migrations...")
- run_migrations(engine)
- typer.echo("Migrations completed successfully!")
-
-
-@app.command()
-def create(message: str = typer.Argument(..., help="Migration message")) -> None:
- """Create a new migration file."""
- typer.echo(f"Creating migration: {message}")
- revision = create_migration(message)
- typer.echo(f"Created migration with revision: {revision}")
-
-
-@app.command()
-def current() -> None:
- """Show the current database revision."""
- database_url = get_database_url()
- engine = create_engine(database_url)
-
- current_rev = get_current_revision(engine)
- typer.echo(f"Current database revision: {current_rev}")
-
-
-@app.command()
-def head() -> None:
- """Show the head revision."""
- head_rev = get_head_revision()
- typer.echo(f"Head revision: {head_rev}")
-
-
-if __name__ == "__main__":
- app()
diff --git a/src/talos/cli/proposals.py b/src/talos/cli/proposals.py
deleted file mode 100644
index 938687b7..00000000
--- a/src/talos/cli/proposals.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import os
-import typer
-from langchain_openai import ChatOpenAI
-
-from talos.skills.proposals import ProposalsSkill
-
-proposals_app = typer.Typer()
-
-
-@proposals_app.command("eval")
-def eval_proposal(
- filepath: str = typer.Option(..., "--file", "-f", help="Path to the proposal file."),
- model_name: str = "gpt-5",
- temperature: float = 0.0,
-):
- """
- Evaluates a proposal from a file.
- """
- if not os.path.exists(filepath):
- raise FileNotFoundError(f"File not found at {filepath}")
- model = ChatOpenAI(model=model_name, temperature=temperature)
- skill = ProposalsSkill(llm=model)
- response = skill.run(filepath=filepath)
- print(response.answers[0])
diff --git a/src/talos/cli/server.py b/src/talos/cli/server.py
deleted file mode 100644
index ef47682e..00000000
--- a/src/talos/cli/server.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-from __future__ import annotations
-
-from talos.server.main import app
-
-if __name__ == "__main__":
- import uvicorn
-
- uvicorn.run(app, host="0.0.0.0", port=8080)
diff --git a/src/talos/cli/twitter.py b/src/talos/cli/twitter.py
deleted file mode 100644
index 4aa17012..00000000
--- a/src/talos/cli/twitter.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from typing import Optional
-import typer
-
-from talos.skills.twitter_persona import TwitterPersonaSkill
-from talos.skills.twitter_sentiment import TwitterSentimentSkill
-from talos.skills.twitter_voice import TwitterVoiceSkill
-
-twitter_app = typer.Typer()
-
-
-@twitter_app.command()
-def get_user_prompt(username: str):
- """
- Gets the general voice of a user as a structured persona analysis.
- """
- skill = TwitterPersonaSkill()
- response = skill.run(username=username)
-
- print(f"=== Twitter Persona Analysis for @{username} ===\n")
- print(f"Report:\n{response.report}\n")
- print(f"Topics: {', '.join(response.topics)}\n")
- print(f"Style: {', '.join(response.style)}")
-
-
-@twitter_app.command()
-def get_query_sentiment(query: str, start_time: Optional[str] = None):
- """
- Gets the general sentiment/report on a specific query.
-
- Args:
- query: Search query for tweets
- start_time: Optional datetime filter (ISO 8601 format, e.g., "2023-01-01T00:00:00Z")
- """
- skill = TwitterSentimentSkill()
- response = skill.run(query=query, start_time=start_time)
- if response.score is not None:
- print(f"Sentiment Score: {response.score}/100")
- print("=" * 50)
- print(response.answers[0])
-
-
-@twitter_app.command()
-def integrate_voice(username: str = "talos_is"):
- """
- Integrate Twitter voice analysis into agent communication.
-
- Args:
- username: Twitter username to analyze (defaults to talos_is)
- """
- skill = TwitterVoiceSkill()
- result = skill.run(username=username)
-
- print(f"=== Voice Integration for @{username} ===\n")
- print(f"Voice Source: {result['voice_source']}")
- print(f"Voice Prompt Generated:\n{result['voice_prompt']}")
- print("\nPersona Analysis:")
- print(f"Topics: {', '.join(result['persona_analysis'].topics)}")
- print(f"Style: {', '.join(result['persona_analysis'].style)}")
diff --git a/src/talos/cli/utils.py b/src/talos/cli/utils.py
deleted file mode 100644
index 65ab66ca..00000000
--- a/src/talos/cli/utils.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from typing import Optional
-import os
-import typer
-
-
-def get_repo_info(repo: Optional[str] = None) -> tuple[str, str]:
- """Get repository owner and name from CLI arg or environment variable."""
- repo_str = repo or os.getenv("GITHUB_REPO")
- if not repo_str:
- raise typer.BadParameter("Repository must be provided via --repo argument or GITHUB_REPO environment variable")
-
- if "/" not in repo_str:
- raise typer.BadParameter("Repository must be in format 'owner/repo'")
-
- owner, repo_name = repo_str.split("/", 1)
- return owner.strip(), repo_name.strip()
diff --git a/src/talos/code_agent.py b/src/talos/code_agent.py
deleted file mode 100644
index 6984d03b..00000000
--- a/src/talos/code_agent.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from abc import ABC, abstractmethod
-
-
-class CodeAgent(ABC):
- """
- An abstract base class for a code agent.
- """
-
- @abstractmethod
- def work_on_task(self, repository: str, task: str) -> None:
- """
- Works on a task in a repository.
- """
- pass
-
- @abstractmethod
- def ask_question(self, question: str) -> str:
- """
- Asks the user a question.
- """
- pass
-
- @abstractmethod
- def interrupt(self, message: str) -> None:
- """
- Interrupts the agent with a message.
- """
- pass
-
- @abstractmethod
- def halt(self) -> None:
- """
- Halts the agent.
- """
- pass
-
- @abstractmethod
- def resume(self) -> None:
- """
- Resumes the agent.
- """
- pass
-
- @abstractmethod
- def get_current_task(self) -> str | None:
- """
- Returns the current task.
- """
- pass
diff --git a/src/talos/constants.py b/src/talos/constants.py
deleted file mode 100644
index ef34a7b2..00000000
--- a/src/talos/constants.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from eth_typing import HexAddress, HexStr
-
-
-class WETH:
- ARBITRUM = HexAddress(HexStr("0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"))
-
-
-class OHM:
- ARBITRUM = HexAddress(HexStr("0xf0cb2dc0db5e6c66B9a70Ac27B06b878da017028"))
-
-
-class CamelotYakSwapConstants:
- ADAPTER = HexAddress(HexStr("0x610934FEBC44BE225ADEcD888eAF7DFf3B0bc050"))
- ROUTER = HexAddress(HexStr("0x99D4e80DB0C023EFF8D25d8155E0dCFb5aDDeC5E"))
-
-
-class CamelotPoolAddresses:
- OHM_ETH = HexAddress(HexStr("0x8aCd42e4B5A5750B44A28C5fb50906eBfF145359"))
diff --git a/src/talos/contracts/camelot_swap.py b/src/talos/contracts/camelot_swap.py
deleted file mode 100644
index 882d7147..00000000
--- a/src/talos/contracts/camelot_swap.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from typing import Annotated
-
-from eth_rpc import ContractFunc, PrivateKeyWallet, ProtocolBase, TransactionReceipt
-from eth_rpc.networks import Arbitrum
-from eth_rpc.types import METHOD, Name, Struct, primitives
-from eth_rpc.utils import EventReceiptUtility
-from eth_typeshed.erc20 import TransferEvent, TransferEventType
-from eth_typing import HexAddress, HexStr
-from pydantic import BaseModel
-
-from talos.constants import OHM, WETH, CamelotYakSwapConstants
-
-
-class Trade(Struct):
- amount_in: primitives.uint256
- amount_out: primitives.uint256
- path: list[HexAddress]
- adapters: list[HexAddress]
- recipients: list[HexAddress]
-
-
-class Request(BaseModel):
- trade: Trade
- fee: primitives.uint256
- to: HexAddress
-
-
-class QueryAdapterArgs(BaseModel):
- amount_in: primitives.uint256
- token_in: HexAddress
- token_out: HexAddress
- index: primitives.uint8
-
-
-class QueryAdapterResponse(BaseModel):
- amount_out: primitives.uint256
- pool_address: HexAddress
-
-
-class CamelotYakSwap(ProtocolBase):
- swap_no_split_from_eth: Annotated[ContractFunc[Request, None], Name("swapNoSplitFromETH")] = METHOD
- ADAPTERS: ContractFunc[primitives.uint256, primitives.address] = METHOD
- query_adapter: Annotated[ContractFunc[QueryAdapterArgs, QueryAdapterResponse], Name("queryAdapter")] = METHOD
-
- @classmethod
- def OHM_PATH(cls) -> list[HexAddress]:
- return [WETH.ARBITRUM, OHM.ARBITRUM]
-
- @classmethod
- async def swap_for_ohm(
- self,
- amount_in: primitives.uint256,
- wallet: PrivateKeyWallet,
- ) -> tuple[HexStr, TransferEventType]:
- router = CamelotYakSwap[Arbitrum](address=CamelotYakSwapConstants.ROUTER)
-
- query_response = await router.query_adapter(
- QueryAdapterArgs(
- amount_in=amount_in,
- token_in=WETH.ARBITRUM,
- token_out=OHM.ARBITRUM,
- index=primitives.uint8(0),
- )
- ).get()
-
- path = self.OHM_PATH()
- adapters = [CamelotYakSwapConstants.ADAPTER]
- recipients = [query_response.pool_address]
- request = Request(
- trade=Trade(
- amount_in=amount_in,
- amount_out=primitives.uint256(int(query_response.amount_out * 99 / 100)),
- path=path,
- adapters=adapters,
- recipients=recipients,
- ),
- fee=primitives.uint256(0),
- to=wallet.address,
- )
-
- tx_hash = await router.swap_no_split_from_eth(request).execute(wallet, value=amount_in)
-
- await TransactionReceipt[Arbitrum].wait_until_finalized(tx_hash, timeout=10)
-
- receipt = await TransactionReceipt[Arbitrum].get_by_hash(tx_hash)
-
- assert receipt is not None
- transfers = await EventReceiptUtility.get_events_from_receipt([TransferEvent], receipt)
-
- received_events: list[TransferEventType] = []
- sent_events = []
- for transfer in transfers:
- if (
- transfer.event.recipient.lower() == wallet.address.lower()
- and transfer.log.address.lower() == OHM.ARBITRUM.lower()
- ):
- received_events.append(transfer.event)
- if transfer.event.sender.lower() == wallet.address.lower():
- sent_events.append(transfer.event)
-
- received_event = received_events[0]
- return (tx_hash, received_event)
diff --git a/src/talos/contracts/ccip/__init__.py b/src/talos/contracts/ccip/__init__.py
deleted file mode 100644
index 9b84f191..00000000
--- a/src/talos/contracts/ccip/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from .router import (
- CCIPConstants,
- CCIPFeeArgs,
- CCIPRouter,
- CCIPRouterAddress,
- CCIPSendArgs,
- EVM2AnyMessage,
- EVMTokenAmount,
-)
-from .schema import CCIPMessageResponse, CCIPMessageStatusResponse
-
-__all__ = [
- "CCIPRouter",
- "CCIPRouterAddress",
- "CCIPConstants",
- "EVM2AnyMessage",
- "EVMTokenAmount",
- "CCIPSendArgs",
- "CCIPFeeArgs",
- "CCIPMessageResponse",
- "CCIPMessageStatusResponse",
-]
diff --git a/src/talos/contracts/ccip/router.py b/src/talos/contracts/ccip/router.py
deleted file mode 100644
index 0ce7b052..00000000
--- a/src/talos/contracts/ccip/router.py
+++ /dev/null
@@ -1,167 +0,0 @@
-from enum import IntEnum, StrEnum
-from typing import Annotated
-
-import httpx
-from eth_abi import encode
-from eth_rpc import ContractFunc, PrivateKeyWallet, ProtocolBase, TransactionReceipt
-from eth_rpc.networks import Arbitrum, Ethereum
-from eth_rpc.types import METHOD, Name, Network, Struct, primitives
-from eth_typeshed.erc20 import ApproveRequest, OwnerSpenderRequest
-from eth_typeshed.weth import WETH
-from eth_typing import HexAddress, HexStr
-from pydantic import BaseModel
-
-from .schema import CCIPMessageResponse, CCIPMessageStatusResponse
-
-ZERO_ADDRESS = HexAddress(HexStr("0x0000000000000000000000000000000000000000"))
-EVM_EXTRA_ARGS_V1_TAG = "0x97a657c9"
-
-
-class WETH_ADDRESS(StrEnum):
- ARBITRUM = HexAddress(HexStr("0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"))
- ETHEREUM = HexAddress(HexStr("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"))
-
- @classmethod
- def from_network(cls, network: type[Network]) -> HexAddress:
- if network == Arbitrum:
- return HexAddress(HexStr(cls.ARBITRUM))
- elif network == Ethereum:
- return HexAddress(HexStr(cls.ETHEREUM))
- raise ValueError(f"Invalid network: {network}")
-
-
-class CCIPRouterAddress(StrEnum):
- ARBITRUM = HexAddress(HexStr("0x141fa059441E0ca23ce184B6A78bafD2A517DdE8"))
- ETHEREUM = HexAddress(HexStr("0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D"))
-
- @classmethod
- def from_network(cls, network: type[Network]) -> HexAddress:
- if network == Arbitrum:
- return HexAddress(HexStr(cls.ARBITRUM))
- elif network == Ethereum:
- return HexAddress(HexStr(cls.ETHEREUM))
- raise ValueError(f"Invalid network: {network}")
-
-
-class CCIPConstants(IntEnum):
- ARBITRUM = 4949039107694359620
- MAINNET = 5009297550715157269
-
- @classmethod
- def from_network(cls, network: type[Network]) -> primitives.uint64:
- if network == Arbitrum:
- return primitives.uint64(cls.ARBITRUM)
- elif network == Ethereum:
- return primitives.uint64(cls.MAINNET)
- raise ValueError(f"Invalid network: {network}")
-
-
-class EVMTokenAmount(Struct):
- token: HexAddress
- amount: primitives.uint256
-
-
-class EVM2AnyMessage(Struct):
- receiver: bytes
- data: bytes
- token_amounts: list[EVMTokenAmount]
- fee_token: HexAddress
- extra_args: bytes
-
-
-class CCIPSendArgs(BaseModel):
- dest_chain_selector: primitives.uint64
- message: EVM2AnyMessage
-
-
-class CCIPFeeArgs(BaseModel):
- dest_chain_selector: primitives.uint64
- message: EVM2AnyMessage
-
-
-class CCIPRouter(ProtocolBase):
- get_fee: Annotated[ContractFunc[CCIPFeeArgs, primitives.uint256], Name("getFee")] = METHOD
- ccip_send: Annotated[ContractFunc[CCIPSendArgs, primitives.bytes32], Name("ccipSend")] = METHOD
-
- @classmethod
- def _encode_address(self, address: HexAddress) -> bytes:
- address_hex: bytes = encode(["address"], [address])
- return address_hex
-
- @classmethod
- def _encode_gas_limit(self, gas_limit: primitives.uint256) -> bytes:
- prefix = EVM_EXTRA_ARGS_V1_TAG[2:]
- gas_limit_bytes = gas_limit.to_bytes(32, "big").hex()
- return bytes.fromhex(f"{prefix}{gas_limit_bytes}")
-
- async def bridge_native(
- self,
- amount: primitives.uint256,
- wallet: PrivateKeyWallet,
- from_network: type[Network],
- to_network: type[Network],
- gas_limit: primitives.uint256 = primitives.uint256(200_000),
- recipient: HexAddress | None = None,
- verbose: bool = False,
- wrap: bool = True,
- ) -> HexStr:
- weth_address = WETH_ADDRESS.from_network(from_network)
- weth_contract = WETH[from_network](address=weth_address)
- if wrap:
- tx_hash = await weth_contract.deposit().execute(wallet, value=amount)
-
- await TransactionReceipt[from_network].wait_until_finalized(tx_hash, timeout=10)
-
- if verbose:
- print(f"WETH deposit tx hash: {tx_hash}")
-
- if not recipient:
- recipient = wallet.address
-
- allowance = await weth_contract.allowance(OwnerSpenderRequest(owner=wallet.address, spender=self.address)).get()
- if allowance < amount:
- tx_hash = await weth_contract.approve(ApproveRequest(spender=self.address, amount=amount)).execute(wallet)
-
- await TransactionReceipt[from_network].wait_until_finalized(tx_hash, timeout=10)
-
- if verbose:
- print(f"WETH approve tx hash: {tx_hash}")
-
- dest_chain_selector = CCIPConstants.from_network(to_network)
- message = EVM2AnyMessage(
- receiver=self._encode_address(recipient),
- data=b"",
- token_amounts=[EVMTokenAmount(token=weth_address, amount=amount)],
- extra_args=self._encode_gas_limit(gas_limit),
- fee_token=ZERO_ADDRESS,
- )
- fee_amount = await self.get_fee(CCIPFeeArgs(dest_chain_selector=dest_chain_selector, message=message)).get()
-
- return HexStr(
- await self.ccip_send(CCIPSendArgs(dest_chain_selector=dest_chain_selector, message=message)).execute(
- wallet, value=fee_amount
- )
- )
-
- async def find_message_id(self, tx_hash: HexStr) -> HexStr | None:
- CCIP_EXPLORER_QUERY_URL = "https://ccip.chain.link/api/h/atlas/search?msgIdOrTxnHash="
-
- async with httpx.AsyncClient() as client:
- response = await client.get(f"{CCIP_EXPLORER_QUERY_URL}{tx_hash}")
- if response.status_code != 200:
- raise Exception(f"Failed to get CCIP message status: {response.status_code}")
-
- status_response = CCIPMessageStatusResponse.model_validate(response.json())
-
- if status_response.transaction_hash:
- return status_response.transaction_hash[0].message_id
- return None
-
- async def check_status(self, tx_hash: HexStr) -> CCIPMessageResponse:
- CCIP_EXPLORER_URL = "https://ccip.chain.link/api/h/atlas/message/"
-
- async with httpx.AsyncClient() as client:
- response = await client.get(f"{CCIP_EXPLORER_URL}{tx_hash}")
- if response.status_code != 200:
- raise Exception(f"Failed to get CCIP message status: {response.status_code}")
- return CCIPMessageResponse.model_validate(response.json())
diff --git a/src/talos/contracts/ccip/schema.py b/src/talos/contracts/ccip/schema.py
deleted file mode 100644
index 041292a1..00000000
--- a/src/talos/contracts/ccip/schema.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from datetime import datetime
-from typing import Any, Optional
-
-from eth_typing import HexAddress, HexStr
-from pydantic import BaseModel, ConfigDict
-from pydantic.alias_generators import to_camel
-
-
-class TransactionHashItem(BaseModel):
- message_id: HexStr
-
- model_config = ConfigDict(
- alias_generator=to_camel,
- populate_by_name=True,
- )
-
-
-class CCIPMessageStatusResponse(BaseModel):
- dest_transaction_hash: list[Any]
- transaction_hash: list[TransactionHashItem]
- message_id: list[Any]
-
- model_config = ConfigDict(
- alias_generator=to_camel,
- populate_by_name=True,
- )
-
-
-class TokenAmount(BaseModel):
- token: HexAddress
- amount: str
-
- model_config = ConfigDict(
- alias_generator=to_camel,
- populate_by_name=True,
- )
-
-
-class CCIPMessageResponse(BaseModel):
- message_id: HexStr
- state: Optional[Any] = None
- votes: Optional[Any] = None
- source_network_name: str
- dest_network_name: str
- commit_block_timestamp: Optional[Any] = None
- root: Optional[Any] = None
- send_finalized: Optional[Any] = None
- commit_store: HexAddress
- origin: HexAddress
- sequence_number: int
- sender: HexAddress
- receiver: HexAddress
- router_address: HexAddress
- onramp_address: HexAddress
- offramp_address: HexAddress
- dest_router_address: HexAddress
- send_transaction_hash: HexStr
- send_timestamp: datetime
- send_block: int
- send_log_index: int
- min: Optional[Any] = None
- max: Optional[Any] = None
- commit_transaction_hash: Optional[HexStr] = None
- commit_block_number: Optional[int] = None
- commit_log_index: Optional[int] = None
- arm: HexAddress
- bless_transaction_hash: Optional[HexStr] = None
- bless_block_number: Optional[int] = None
- bless_block_timestamp: Optional[datetime] = None
- bless_log_index: Optional[int] = None
- receipt_transaction_hash: Optional[HexStr] = None
- receipt_timestamp: Optional[datetime] = None
- receipt_block: Optional[int] = None
- receipt_log_index: Optional[int] = None
- receipt_finalized: Optional[Any] = None
- data: HexStr
- strict: bool
- nonce: int
- fee_token: HexAddress
- gas_limit: str
- fee_token_amount: str
- token_amounts: list[TokenAmount]
- info_raw: str
- fast_filled: bool
- permission_less_execution_threshold_seconds: int
-
- model_config = ConfigDict(
- alias_generator=to_camel,
- populate_by_name=True,
- )
diff --git a/src/talos/contracts/gmx/__init__.py b/src/talos/contracts/gmx/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/contracts/gmx/argument_parser.py b/src/talos/contracts/gmx/argument_parser.py
deleted file mode 100644
index d5595451..00000000
--- a/src/talos/contracts/gmx/argument_parser.py
+++ /dev/null
@@ -1,410 +0,0 @@
-import asyncio
-from typing import Any, Callable
-
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress
-from pydantic import BaseModel, Field, PrivateAttr
-
-from .getters.markets import Markets
-from .getters.prices import OraclePrices
-from .types import Market, TokenMetadata
-from .utils import determine_swap_route, get_tokens_address_dict, median
-
-
-class OrderArgumentParser(BaseModel):
- parameters_dict: dict[str, Any] = Field(default_factory=dict)
- is_increase: bool = Field(default=False)
- is_decrease: bool = Field(default=False)
- is_swap: bool = Field(default=False)
-
- markets: Markets = Field(default_factory=Markets)
-
- _required_keys: list[str] = PrivateAttr()
- _missing_base_key_methods: dict[str, Callable[[], Any]] = PrivateAttr()
-
- def model_post_init(self, __context: Any) -> None:
- if self.is_increase:
- self._required_keys = [
- "index_token_address",
- "market_key",
- "start_token_address",
- "collateral_address",
- "swap_path",
- "is_long",
- "size_delta_usd",
- "initial_collateral_delta",
- "slippage_percent",
- ]
-
- if self.is_decrease:
- self._required_keys = [
- "index_token_address",
- "market_key",
- "start_token_address",
- "collateral_address",
- "is_long",
- "size_delta_usd",
- "initial_collateral_delta",
- "slippage_percent",
- ]
-
- if self.is_swap:
- self._required_keys = [
- "start_token_address",
- "out_token_address",
- "initial_collateral_delta",
- "swap_path",
- "slippage_percent",
- ]
-
- self._missing_base_key_methods = {
- "start_token_address": self._handle_missing_start_token_address,
- "index_token_address": self._handle_missing_index_token_address,
- "market_key": self._handle_missing_market_key,
- "out_token_address": self._handle_missing_out_token_address,
- "collateral_address": self._handle_missing_collateral_address,
- "swap_path": self._handle_missing_swap_path,
- "is_long": self._handle_missing_is_long,
- "slippage_percent": self._handle_missing_slippage_percent,
- }
-
- async def process_parameters_dictionary(self, parameters_dict: dict[str, Any]) -> dict[str, Any]:
- await self.markets.load_info()
-
- missing_keys = self._determine_missing_keys(parameters_dict)
-
- self.parameters_dict = parameters_dict
-
- for missing_key in missing_keys:
- if missing_key in self._missing_base_key_methods:
- if asyncio.iscoroutinefunction(self._missing_base_key_methods[missing_key]):
- await self._missing_base_key_methods[missing_key]()
- else:
- self._missing_base_key_methods[missing_key]()
-
- if not self.is_swap:
- await self.calculate_missing_position_size_info_keys()
- await self._check_if_max_leverage_exceeded()
-
- if self.is_increase:
- if await self._calculate_initial_collateral_usd() < 2:
- raise Exception("Position size must be backed by >$2 of collateral!")
-
- await self._format_size_info()
-
- return self.parameters_dict
-
- def _determine_missing_keys(self, parameters_dict: dict[str, Any]) -> list[str]:
- """
- Compare keys in the supposed dictionary to a list of keys which are required to create an
- order
-
- Parameters
- ----------
- parameters_dict : dict
- user suppled dictionary of parameters to create order.
-
- """
- return [key for key in self._required_keys if key not in parameters_dict]
-
- async def _handle_missing_index_token_address(self) -> None:
- """
- Will trigger if index token address is missing. Can be determined if index token symbol is
- found, but will raise an exception if that cant be found either
- """
-
- try:
- token_symbol = self.parameters_dict["index_token_symbol"]
-
- # Exception for tickers api
- if token_symbol == "BTC":
- token_symbol = "WBTC.b"
- except KeyError:
- raise Exception("Index Token Address and Symbol not provided!")
-
- self.parameters_dict["index_token_address"] = self.find_key_by_symbol(
- await get_tokens_address_dict(), token_symbol
- )
-
- def _handle_missing_market_key(self) -> None:
- """
- Will trigger if market key is missing. Can be determined from index token address.
- """
-
- index_token_address = self.parameters_dict["index_token_address"]
-
- if index_token_address == "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f":
- index_token_address = "0x47904963fc8b2340414262125aF798B9655E58Cd"
-
- # use the index token address to find the market key from get_available_markets
- self.parameters_dict["market_key"] = self.find_market_key_by_index_address(
- self.markets.info, index_token_address
- )
-
- async def _handle_missing_start_token_address(self) -> None:
- """
- Will trigger if start token address is missing. Can be determined if start token symbol is
- found, but will raise an exception if that cant be found either.
- """
- print("HANDLE MISSING START TOKEN ADDRESS")
-
- try:
- start_token_symbol = self.parameters_dict["start_token_symbol"]
-
- # Exception for tickers api
- if start_token_symbol == "BTC":
- self.parameters_dict["start_token_address"] = "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f"
- return
-
- except KeyError:
- raise Exception("Start Token Address and Symbol not provided!")
-
- # search the known tokens for a contract address using the user supplied symbol
- self.parameters_dict["start_token_address"] = self.find_key_by_symbol(
- await get_tokens_address_dict(),
- start_token_symbol,
- )
- print("START TOKEN ADDRESS", self.parameters_dict["start_token_address"])
-
- async def _handle_missing_out_token_address(self) -> None:
- """
- Will trigger if start token address is missing. Can be determined if start token symbol is
- found, but will raise an exception if that cant be found either.
- """
-
- try:
- start_token_symbol = self.parameters_dict["out_token_symbol"]
- except KeyError:
- raise Exception("Out Token Address and Symbol not provided!")
-
- # search the known tokens for a contract address using the user supplied symbol
- self.parameters_dict["out_token_address"] = self.find_key_by_symbol(
- await get_tokens_address_dict(), start_token_symbol
- )
-
- async def _handle_missing_collateral_address(self) -> None:
- """
- Will trigger if collateral address is missing. Can be determined if collateral token symbol
- is found, but will raise an exception if that cant be found either
- """
-
- try:
- collateral_token_symbol = self.parameters_dict["collateral_token_symbol"]
-
- # Exception for tickers api
- if collateral_token_symbol == "BTC":
- self.parameters_dict["collateral_address"] = "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f"
- return
- except KeyError:
- raise Exception("Collateral Token Address and Symbol not provided!")
-
- # search the known tokens for a contract address using the user supplied symbol
- collateral_str = self.find_key_by_symbol(await get_tokens_address_dict(), collateral_token_symbol)
- assert collateral_str is not None
-
- collateral_address = to_checksum(collateral_str)
-
- # check if the collateral token address can be used in the requested market
- if self._check_if_valid_collateral_for_market(collateral_address) and not self.is_swap:
- self.parameters_dict["collateral_address"] = collateral_address
-
- def _handle_missing_swap_path(self) -> None:
- """
- Will trigger if swap path is missing. If start token is the same collateral, no swap path is
- required but otherwise will use determine_swap_route to find the path from start token to
- collateral token
- """
-
- if self.is_swap:
- # function returns swap route as a list [0] and a bool if there is a multi swap [1]
- self.parameters_dict["swap_path"] = determine_swap_route(
- self.markets.info,
- self.parameters_dict["start_token_address"],
- self.parameters_dict["out_token_address"],
- )[0]
-
- # No Swap Path required to map
- elif self.parameters_dict["start_token_address"] == self.parameters_dict["collateral_address"]:
- self.parameters_dict["swap_path"] = []
-
- else:
- # function returns swap route as a list [0] and a bool if there is a multi swap [1]
- self.parameters_dict["swap_path"] = determine_swap_route(
- self.markets.info,
- self.parameters_dict["start_token_address"],
- self.parameters_dict["collateral_address"],
- )[0]
-
- def _handle_missing_is_long(self) -> None:
- """
- Will trigger if is_long is missing from parameters dictionary, is_long must be supplied by
- user
- """
-
- raise Exception("Please indiciate if position is_long!")
-
- def _handle_missing_slippage_percent(self) -> None:
- """
- Will trigger if slippage is missing from parameters dictionary, slippage must be supplied by
- user
- """
-
- raise Exception("Please indiciate slippage!")
-
- def _check_if_valid_collateral_for_market(self, collateral_address: ChecksumAddress) -> bool:
- """
- Check is collateral address is valid in the requested market.
- A collateral token is only valid if it is the long or short token of the market.
-
- Parameters
- ----------
- collateral_address : str
- address of collateral token.
- """
-
- market_key = self.parameters_dict["market_key"]
-
- if self.parameters_dict["market_key"] == "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f":
- market_key = "0x47c031236e19d024b42f8AE6780E44A573170703"
-
- assert self.markets.info is not None
- market = self.markets.info[market_key]
-
- if collateral_address == market.long_token_address or collateral_address == market.short_token_address:
- return True
- raise Exception("Not a valid collateral for selected market!")
-
- @staticmethod
- def find_key_by_symbol(input_dict: dict[ChecksumAddress, TokenMetadata], search_symbol: str) -> str | None:
- """
- For a given token symbol, identify that key in input_dict that matches the value for
- 'symbol'
- """
-
- for key, value in input_dict.items():
- if value.symbol == search_symbol:
- return key
- raise Exception('"{}" not a known token for GMX v2!'.format(search_symbol))
-
- @staticmethod
- def find_market_key_by_index_address(
- input_dict: dict[ChecksumAddress, Market], index_token_address: ChecksumAddress
- ) -> ChecksumAddress | None:
- """
- For a given index token address, identify that key in input_dict that matches the value for
- 'index_token_address'
- """
-
- for key, value in input_dict.items():
- if value.index_token_address == index_token_address:
- return key
- return None
-
- async def calculate_missing_position_size_info_keys(self) -> dict[str, Any]:
- """
- Look at combinations of size_delta_usd_delta, intial_collateral_delta, and leverage and
- see if any missing required parameters can be calculated.
- """
-
- # Both size_delta_usd and initial_collateral_delta have been suppled, no issue
- if "size_delta_usd" in self.parameters_dict and "initial_collateral_delta" in self.parameters_dict:
- return self.parameters_dict
-
- # leverage and initial_collateral_delta supplied, we can calculate size_delta_usd if missing
- elif (
- "leverage" in self.parameters_dict
- and "initial_collateral_delta" in self.parameters_dict
- and "size_delta_usd" not in self.parameters_dict
- ):
- initial_collateral_delta_usd = await self._calculate_initial_collateral_usd()
-
- self.parameters_dict["size_delta_usd"] = self.parameters_dict["leverage"] * initial_collateral_delta_usd
- return self.parameters_dict
-
- # size_delta_usd and leverage supplied, we can calculate initial_collateral_delta if missing
- elif (
- "size_delta_usd" in self.parameters_dict
- and "leverage" in self.parameters_dict
- and "initial_collateral_delta" not in self.parameters_dict
- ):
- collateral_usd = self.parameters_dict["size_delta_usd"] / self.parameters_dict["leverage"]
-
- self.parameters_dict["initial_collateral_delta"] = await self._calculate_initial_collateral_tokens(
- collateral_usd
- )
-
- return self.parameters_dict
- else:
- potential_missing_keys = '"size_delta_usd", "initial_collateral_delta", or "leverage"!'
- raise Exception(
- "Required keys are missing or provided incorrectly, please check: {}".format(potential_missing_keys)
- )
-
- async def _calculate_initial_collateral_usd(self) -> float:
- """
- Calculate the USD value of the number of tokens supplied in initial collateral delta
- """
-
- initial_collateral_delta_amount = self.parameters_dict["initial_collateral_delta"]
- prices = await OraclePrices().get_recent_prices()
- price: float = median(
- [
- float(prices[self.parameters_dict["start_token_address"]].max_price_full),
- float(prices[self.parameters_dict["start_token_address"]].min_price_full),
- ]
- )
- address_dict = await get_tokens_address_dict()
- oracle_factor: int = address_dict[self.parameters_dict["start_token_address"]].decimals - 30
-
- return float((price * 10**oracle_factor) * initial_collateral_delta_amount)
-
- async def _calculate_initial_collateral_tokens(self, collateral_usd: float) -> float:
- """
- Calculate the amount of tokens collateral from the USD value
- """
-
- prices = await OraclePrices().get_recent_prices()
- price = median(
- [
- float(prices[self.parameters_dict["start_token_address"]].max_price_full),
- float(prices[self.parameters_dict["start_token_address"]].min_price_full),
- ]
- )
- address_dict = await get_tokens_address_dict()
- oracle_factor = address_dict[self.parameters_dict["start_token_address"]].decimals - 30
-
- return float(collateral_usd / (price * 10**oracle_factor))
-
- async def _format_size_info(self) -> None:
- """
- Convert size_delta and initial_collateral_delta to significant figures which will be
- accepted on chain
- """
-
- if not self.is_swap:
- # All USD numbers need to be 10**30
- self.parameters_dict["size_delta"] = int(self.parameters_dict["size_delta_usd"] * 10**30)
-
- # Each token has its a specific decimal factor that needs to be applied
- token_address_dict = await get_tokens_address_dict()
- decimal = token_address_dict[self.parameters_dict["start_token_address"]].decimals
- print("decimal", decimal)
- print('self.parameters_dict["initial_collateral_delta"]', self.parameters_dict["initial_collateral_delta"])
- self.parameters_dict["initial_collateral_delta"] = int(
- self.parameters_dict["initial_collateral_delta"] * 10**decimal
- )
- print('self.parameters_dict["initial_collateral_delta"]', self.parameters_dict["initial_collateral_delta"])
-
- async def _check_if_max_leverage_exceeded(self) -> None:
- """
- Using collateral tokens and size_delta calculate the requested leverage size and raise
- exception if this exceeds x100.
- """
-
- collateral_usd_value = await self._calculate_initial_collateral_usd()
- leverage_requested = self.parameters_dict["size_delta_usd"] / collateral_usd_value
-
- # TODO - leverage is now a contract parameter and needs to be queried
- max_leverage = 100
- if leverage_requested > max_leverage:
- raise Exception('Leverage requested "x{:.2f}" can not exceed x100!'.format(leverage_requested))
diff --git a/src/talos/contracts/gmx/constants/__init__.py b/src/talos/contracts/gmx/constants/__init__.py
deleted file mode 100644
index 7af0742a..00000000
--- a/src/talos/contracts/gmx/constants/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from eth_typing import ChecksumAddress, HexAddress, HexStr
-
-PRECISION = 30
-DATASTORE_ADDRESS = HexAddress(HexStr("0xFD70de6b91282D8017aA4E741e9Ae325CAb992d8"))
-SYNTHETICS_ROUTER_CONTRACT_ADDRESS = HexAddress(HexStr("0x7452c558d45f8afC8c83dAe62C3f8A5BE19c71f6"))
-EXCHANGE_ROUTER_ADDRESS = HexAddress(HexStr("0x87d66368cD08a7Ca42252f5ab44B2fb6d1Fb8d15"))
-ORDER_VAULT_ADDRESS = HexAddress(HexStr("0x31eF83a530Fde1B38EE9A18093A333D8Bbbc40D5"))
-
-USDC_ADDRESS = HexAddress(HexStr("0xaf88d065e77c8cC2239327C5EDb3A432268e5831"))
-WETH_ADDRESS = ChecksumAddress(HexAddress(HexStr("0x82aF49447D8a07e3bd95BD0d56f35241523fBab1")))
-
-CONTRACT_MAP = {
- "arbitrum": {
- "datastore": {
- "contract_address": "0xFD70de6b91282D8017aA4E741e9Ae325CAb992d8",
- "abi_path": "contracts/arbitrum/datastore.json",
- },
- "eventemitter": {
- "contract_address": "0xC8ee91A54287DB53897056e12D9819156D3822Fb",
- "abi_path": "contracts/arbitrum/eventemitter.json",
- },
- "exchangerouter": {
- "contract_address": "0x900173A66dbD345006C51fA35fA3aB760FcD843b",
- "abi_path": "contracts/arbitrum/exchangerouter.json",
- },
- "depositvault": {
- "contract_address": "0xF89e77e8Dc11691C9e8757e84aaFbCD8A67d7A55",
- "abi_path": "contracts/arbitrum/depositvault.json",
- },
- "withdrawalvault": {
- "contract_address": "0x0628D46b5D145f183AdB6Ef1f2c97eD1C4701C55",
- "abi_path": "contracts/arbitrum/withdrawalvault.json",
- },
- "ordervault": {
- "contract_address": "0x31eF83a530Fde1B38EE9A18093A333D8Bbbc40D5",
- "abi_path": "contracts/arbitrum/ordervault.json",
- },
- "syntheticsreader": {
- "contract_address": "0x5Ca84c34a381434786738735265b9f3FD814b824",
- "abi_path": "contracts/arbitrum/syntheticsreader.json",
- },
- "syntheticsrouter": {
- "contract_address": "0x7452c558d45f8afC8c83dAe62C3f8A5BE19c71f6",
- "abi_path": "contracts/arbitrum/syntheticsrouter.json",
- },
- "glvreader": {
- "contract_address": "0xd4f522c4339Ae0A90a156bd716715547e44Bed65",
- "abi_path": "contracts/arbitrum/glvreader.json",
- },
- },
- "avalanche": {
- "datastore": {
- "contract_address": "0x2F0b22339414ADeD7D5F06f9D604c7fF5b2fe3f6",
- "abi_path": "contracts/avalanche/datastore.json",
- },
- "eventemitter": {
- "contract_address": "0xDb17B211c34240B014ab6d61d4A31FA0C0e20c26",
- "abi_path": "contracts/avalanche/eventemitter.json",
- },
- "exchangerouter": {
- "contract_address": "0x2b76df209E1343da5698AF0f8757f6170162e78b",
- "abi_path": "contracts/avalanche/exchangerouter.json",
- },
- "depositvault": {
- "contract_address": "0x90c670825d0C62ede1c5ee9571d6d9a17A722DFF",
- "abi_path": "contracts/avalanche/depositvault.json",
- },
- "withdrawalvault": {
- "contract_address": "0xf5F30B10141E1F63FC11eD772931A8294a591996",
- "abi_path": "contracts/avalanche/withdrawalvault.json",
- },
- "ordervault": {
- "contract_address": "0xD3D60D22d415aD43b7e64b510D86A30f19B1B12C",
- "abi_path": "contracts/avalanche/ordervault.json",
- },
- "syntheticsreader": {
- "contract_address": "0xBAD04dDcc5CC284A86493aFA75D2BEb970C72216",
- "abi_path": "contracts/avalanche/syntheticsreader.json",
- },
- "syntheticsrouter": {
- "contract_address": "0x820F5FfC5b525cD4d88Cd91aCf2c28F16530Cc68",
- "abi_path": "contracts/avalanche/syntheticsrouter.json",
- },
- },
-}
diff --git a/src/talos/contracts/gmx/contracts/__init__.py b/src/talos/contracts/gmx/contracts/__init__.py
deleted file mode 100644
index 33d1174a..00000000
--- a/src/talos/contracts/gmx/contracts/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from .datastore import Datastore, datastore
-from .synthetics_reader import SyntheticsReader, synthetics_reader
-from .exchange_router import ExchangeRouter, exchange_router
-
-
-__all__ = [
- "Datastore",
- "ExchangeRouter",
- "SyntheticsReader",
- "datastore",
- "exchange_router",
- "synthetics_reader",
-]
diff --git a/src/talos/contracts/gmx/contracts/datastore.py b/src/talos/contracts/gmx/contracts/datastore.py
deleted file mode 100644
index fdaf27bf..00000000
--- a/src/talos/contracts/gmx/contracts/datastore.py
+++ /dev/null
@@ -1,612 +0,0 @@
-from typing import Annotated, cast
-
-from eth_rpc import ProtocolBase, ContractFunc
-from eth_rpc.networks import Arbitrum
-from eth_rpc.types import primitives, Name, NoArgs
-
-from ..constants import DATASTORE_ADDRESS
-
-
-class Datastore(ProtocolBase):
- add_address: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.address],
- None
- ],
- Name("addAddress"),
- ]
-
- add_bytes32: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.bytes32],
- None
- ],
- Name("addBytes32"),
- ]
-
- add_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- None
- ],
- Name("addUint"),
- ]
-
- address_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.address
- ],
- Name("addressArrayValues"),
- ]
-
- address_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.address
- ],
- Name("addressValues"),
- ]
-
- apply_bounded_delta_to_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256],
- primitives.uint256
- ],
- Name("applyBoundedDeltaToUint"),
- ]
-
- apply_delta_to_int: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256],
- primitives.int256
- ],
- Name("applyDeltaToInt"),
- ]
-
- def apply_delta_to_uint(
- self,
- arg1: primitives.bytes32,
- arg2: primitives.int256 | primitives.uint256,
- arg3: primitives.string | None = None,
- ) -> ContractFunc[
- tuple[primitives.bytes32, primitives.int256, primitives.string] | tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256,
- ]:
- if arg3 is None:
- arg2 = cast(primitives.uint256, arg2)
- return self.apply_delta_to_uint_2((arg1, arg2))
- arg2 = cast(primitives.int256, arg2)
- return self.apply_delta_to_uint_1((arg1, arg2, arg3))
-
- apply_delta_to_uint_1: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256, primitives.string],
- primitives.uint256,
- ],
- Name("applyDeltaToUint"),
- ]
-
- apply_delta_to_uint_2: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256,
- ],
- Name("applyDeltaToUint"),
- ]
-
- bool_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- bool
- ],
- Name("boolArrayValues"),
- ]
-
- bool_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- bool
- ],
- Name("boolValues"),
- ]
-
- bytes32_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.bytes32
- ],
- Name("bytes32ArrayValues"),
- ]
-
- bytes32_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.bytes32
- ],
- Name("bytes32Values"),
- ]
-
- contains_address: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.address],
- bool
- ],
- Name("containsAddress"),
- ]
-
- contains_bytes32: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.bytes32],
- bool
- ],
- Name("containsBytes32"),
- ]
-
- contains_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- bool
- ],
- Name("containsUint"),
- ]
-
- decrement_int: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256],
- primitives.int256
- ],
- Name("decrementInt"),
- ]
-
- decrement_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256
- ],
- Name("decrementUint"),
- ]
-
- get_address: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.address
- ],
- Name("getAddress"),
- ]
-
- get_address_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[primitives.address]
- ],
- Name("getAddressArray"),
- ]
-
- get_address_count: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.uint256
- ],
- Name("getAddressCount"),
- ]
-
- get_address_values_at: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256, primitives.uint256],
- list[primitives.address]
- ],
- Name("getAddressValuesAt"),
- ]
-
- get_bool: Annotated[
- ContractFunc[
- primitives.bytes32,
- bool
- ],
- Name("getBool"),
- ]
-
- get_bool_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[bool]
- ],
- Name("getBoolArray"),
- ]
-
- get_bytes32: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.bytes32
- ],
- Name("getBytes32"),
- ]
-
- get_bytes32_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[primitives.bytes32]
- ],
- Name("getBytes32Array"),
- ]
-
- get_bytes32_count: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.uint256
- ],
- Name("getBytes32Count"),
- ]
-
- get_bytes32_values_at: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256, primitives.uint256],
- list[primitives.bytes32]
- ],
- Name("getBytes32ValuesAt"),
- ]
-
- get_int: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.int256
- ],
- Name("getInt"),
- ]
-
- get_int_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[primitives.int256]
- ],
- Name("getIntArray"),
- ]
-
- get_string: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.string
- ],
- Name("getString"),
- ]
-
- get_string_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[primitives.string]
- ],
- Name("getStringArray"),
- ]
-
- get_uint: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.uint256
- ],
- Name("getUint"),
- ]
-
- get_uint_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- list[primitives.uint256]
- ],
- Name("getUintArray"),
- ]
-
- get_uint_count: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.uint256
- ],
- Name("getUintCount"),
- ]
-
- get_uint_values_at: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256, primitives.uint256],
- list[primitives.uint256]
- ],
- Name("getUintValuesAt"),
- ]
-
- increment_int: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256],
- primitives.int256
- ],
- Name("incrementInt"),
- ]
-
- increment_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256
- ],
- Name("incrementUint"),
- ]
-
- int_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.int256
- ],
- Name("intArrayValues"),
- ]
-
- int_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.int256
- ],
- Name("intValues"),
- ]
-
- remove_address: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.address],
- None
- ],
- Name("removeAddress"),
- ]
-
- remove_address_2: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeAddress"),
- ]
-
- remove_address_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeAddressArray"),
- ]
-
- remove_bool: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeBool"),
- ]
-
- remove_bool_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeBoolArray"),
- ]
-
- remove_bytes32: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.bytes32],
- None
- ],
- Name("removeBytes32"),
- ]
-
- remove_bytes32_2: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeBytes32"),
- ]
-
- remove_bytes32_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeBytes32Array"),
- ]
-
- remove_int: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeInt"),
- ]
-
- remove_int_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeIntArray"),
- ]
-
- remove_string: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeString"),
- ]
-
- remove_string_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeStringArray"),
- ]
-
- def remove_uint(
- self,
- arg1: primitives.bytes32,
- arg2: primitives.uint256 | None = None,
- ) -> ContractFunc[primitives.bytes32 | tuple[primitives.bytes32, primitives.uint256], None]:
- if arg2 is None:
- return self.remove_uint_1(arg1)
- return self.remove_uint_2((arg1, arg2))
-
- remove_uint_1: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeUint"),
- ]
-
- remove_uint_2: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- None
- ],
- Name("removeUint"),
- ]
-
- remove_uint_array: Annotated[
- ContractFunc[
- primitives.bytes32,
- None
- ],
- Name("removeUintArray"),
- ]
-
- role_store: Annotated[
- ContractFunc[
- NoArgs,
- primitives.address
- ],
- Name("roleStore"),
- ]
-
- set_address: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.address],
- primitives.address
- ],
- Name("setAddress"),
- ]
-
- set_address_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[primitives.address]],
- None
- ],
- Name("setAddressArray"),
- ]
-
- set_bool: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, bool],
- bool
- ],
- Name("setBool"),
- ]
-
- set_bool_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[bool]],
- None
- ],
- Name("setBoolArray"),
- ]
-
- set_bytes32: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.bytes32],
- primitives.bytes32
- ],
- Name("setBytes32"),
- ]
-
- set_bytes32_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[primitives.bytes32]],
- None
- ],
- Name("setBytes32Array"),
- ]
-
- set_int: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.int256],
- primitives.int256
- ],
- Name("setInt"),
- ]
-
- set_int_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[primitives.int256]],
- None
- ],
- Name("setIntArray"),
- ]
-
- set_string: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.string],
- primitives.string
- ],
- Name("setString"),
- ]
-
- set_string_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[primitives.string]],
- None
- ],
- Name("setStringArray"),
- ]
-
- set_uint: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256
- ],
- Name("setUint"),
- ]
-
- set_uint_array: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, list[primitives.uint256]],
- None
- ],
- Name("setUintArray"),
- ]
-
- string_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.string
- ],
- Name("stringArrayValues"),
- ]
-
- string_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.string
- ],
- Name("stringValues"),
- ]
-
- uint_array_values: Annotated[
- ContractFunc[
- tuple[primitives.bytes32, primitives.uint256],
- primitives.uint256
- ],
- Name("uintArrayValues"),
- ]
-
- uint_values: Annotated[
- ContractFunc[
- primitives.bytes32,
- primitives.uint256
- ],
- Name("uintValues"),
- ]
-
-
-datastore = Datastore[Arbitrum](address=DATASTORE_ADDRESS)
diff --git a/src/talos/contracts/gmx/contracts/exchange_router.py b/src/talos/contracts/gmx/contracts/exchange_router.py
deleted file mode 100644
index a20825a9..00000000
--- a/src/talos/contracts/gmx/contracts/exchange_router.py
+++ /dev/null
@@ -1,286 +0,0 @@
-from typing import Annotated
-
-from eth_rpc import ContractFunc, ProtocolBase
-from eth_rpc.networks import Arbitrum
-from eth_rpc.types import Name, NoArgs, Struct, primitives
-from eth_typing import HexAddress
-
-from ..constants import EXCHANGE_ROUTER_ADDRESS
-
-
-class Props(Struct):
- min: primitives.uint256
- max: primitives.uint256
-
-
-class CreateOrderParamsNumbers(Struct):
- size_delta_usd: Annotated[primitives.uint256, Name("sizeDeltaUsd")]
- initial_collateral_delta_amount: Annotated[primitives.uint256, Name("initialCollateralDeltaAmount")]
- trigger_price: Annotated[primitives.uint256, Name("triggerPrice")]
- acceptable_price: Annotated[primitives.uint256, Name("acceptablePrice")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
- min_output_amount: Annotated[primitives.uint256, Name("minOutputAmount")]
- valid_from_time: Annotated[primitives.uint256, Name("validFromTime")]
-
-
-class CreateOrderParamsAddresses(Struct):
- receiver: HexAddress
- cancellation_receiver: Annotated[HexAddress, Name("cancellationReceiver")]
- callback_contract: Annotated[HexAddress, Name("callbackContract")]
- ui_fee_receiver: Annotated[HexAddress, Name("uiFeeReceiver")]
- market: HexAddress
- initial_collateral_token: Annotated[HexAddress, Name("initialCollateralToken")]
- swap_path: Annotated[list[HexAddress], Name("swapPath")]
-
-
-class SimulatePricesParams(Struct):
- primary_tokens: Annotated[list[primitives.address], Name("primaryTokens")]
- primary_prices: Annotated[list["Props"], Name("primaryPrices")]
- min_timestamp: Annotated[primitives.uint256, Name("minTimestamp")]
- max_timestamp: Annotated[primitives.uint256, Name("maxTimestamp")]
-
-
-class SetPricesParams(Struct):
- tokens: list[primitives.address]
- providers: list[primitives.address]
- data: list["bytes"]
-
-
-class CreateWithdrawalParams(Struct):
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- market: primitives.address
- long_token_swap_path: Annotated[list[primitives.address], Name("longTokenSwapPath")]
- short_token_swap_path: Annotated[list[primitives.address], Name("shortTokenSwapPath")]
- min_long_token_amount: Annotated[primitives.uint256, Name("minLongTokenAmount")]
- min_short_token_amount: Annotated[primitives.uint256, Name("minShortTokenAmount")]
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class CreateShiftParams(Struct):
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- from_market: Annotated[primitives.address, Name("fromMarket")]
- to_market: Annotated[primitives.address, Name("toMarket")]
- min_market_tokens: Annotated[primitives.uint256, Name("minMarketTokens")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class CreateOrderParams(Struct):
- addresses: CreateOrderParamsAddresses
- numbers: CreateOrderParamsNumbers
- order_type: Annotated[primitives.uint8, Name("orderType")]
- decrease_position_swap_type: Annotated[primitives.uint8, Name("decreasePositionSwapType")]
- is_long: Annotated[bool, Name("isLong")]
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
- auto_cancel: Annotated[bool, Name("autoCancel")]
- referral_code: Annotated[primitives.bytes32, Name("referralCode")]
- data_list: Annotated[list[primitives.bytes32], Name("dataList")]
-
-
-class CreateDepositParams(Struct):
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- market: primitives.address
- initial_long_token: Annotated[primitives.address, Name("initialLongToken")]
- initial_short_token: Annotated[primitives.address, Name("initialShortToken")]
- long_token_swap_path: Annotated[list[primitives.address], Name("longTokenSwapPath")]
- short_token_swap_path: Annotated[list[primitives.address], Name("shortTokenSwapPath")]
- min_market_tokens: Annotated[primitives.uint256, Name("minMarketTokens")]
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class ExchangeRouter(ProtocolBase):
- cancel_deposit: Annotated[
- ContractFunc[primitives.bytes32, None],
- Name("cancelDeposit"),
- ]
-
- cancel_order: Annotated[
- ContractFunc[primitives.bytes32, None],
- Name("cancelOrder"),
- ]
-
- cancel_shift: Annotated[
- ContractFunc[primitives.bytes32, None],
- Name("cancelShift"),
- ]
-
- cancel_withdrawal: Annotated[
- ContractFunc[primitives.bytes32, None],
- Name("cancelWithdrawal"),
- ]
-
- claim_affiliate_rewards: Annotated[
- ContractFunc[
- tuple[list[primitives.address], list[primitives.address], primitives.address], list[primitives.uint256]
- ],
- Name("claimAffiliateRewards"),
- ]
-
- claim_collateral: Annotated[
- ContractFunc[
- tuple[list[primitives.address], list[primitives.address], list[primitives.uint256], primitives.address],
- list[primitives.uint256],
- ],
- Name("claimCollateral"),
- ]
-
- claim_funding_fees: Annotated[
- ContractFunc[
- tuple[list[primitives.address], list[primitives.address], primitives.address], list[primitives.uint256]
- ],
- Name("claimFundingFees"),
- ]
-
- claim_ui_fees: Annotated[
- ContractFunc[
- tuple[list[primitives.address], list[primitives.address], primitives.address], list[primitives.uint256]
- ],
- Name("claimUiFees"),
- ]
-
- create_deposit: Annotated[
- ContractFunc[CreateDepositParams, primitives.bytes32],
- Name("createDeposit"),
- ]
-
- create_order: Annotated[
- ContractFunc[CreateOrderParams, primitives.bytes32],
- Name("createOrder"),
- ]
-
- create_shift: Annotated[
- ContractFunc[CreateShiftParams, primitives.bytes32],
- Name("createShift"),
- ]
-
- create_withdrawal: Annotated[
- ContractFunc[CreateWithdrawalParams, primitives.bytes32],
- Name("createWithdrawal"),
- ]
-
- data_store: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("dataStore"),
- ]
-
- deposit_handler: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("depositHandler"),
- ]
-
- event_emitter: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("eventEmitter"),
- ]
-
- execute_atomic_withdrawal: Annotated[
- ContractFunc[tuple[CreateWithdrawalParams, SetPricesParams], None],
- Name("executeAtomicWithdrawal"),
- ]
-
- external_handler: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("externalHandler"),
- ]
-
- make_external_calls: Annotated[
- ContractFunc[
- tuple[list[primitives.address], list[bytes], list[primitives.address], list[primitives.address]], None
- ],
- Name("makeExternalCalls"),
- ]
-
- multicall: ContractFunc[ # type: ignore
- list[bytes], list[bytes]
- ]
-
- order_handler: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("orderHandler"),
- ]
-
- role_store: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("roleStore"),
- ]
-
- router: ContractFunc[NoArgs, primitives.address]
-
- send_native_token: Annotated[
- ContractFunc[tuple[primitives.address, primitives.uint256], None],
- Name("sendNativeToken"),
- ]
-
- send_tokens: Annotated[
- ContractFunc[tuple[HexAddress, HexAddress, primitives.uint256], None],
- Name("sendTokens"),
- ]
-
- send_wnt: Annotated[
- ContractFunc[tuple[HexAddress, primitives.uint256], None],
- Name("sendWnt"),
- ]
-
- set_saved_callback_contract: Annotated[
- ContractFunc[tuple[primitives.address, primitives.address], None],
- Name("setSavedCallbackContract"),
- ]
-
- set_ui_fee_factor: Annotated[
- ContractFunc[primitives.uint256, None],
- Name("setUiFeeFactor"),
- ]
-
- shift_handler: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("shiftHandler"),
- ]
-
- simulate_execute_deposit: Annotated[
- ContractFunc[tuple[primitives.bytes32, SimulatePricesParams], None],
- Name("simulateExecuteDeposit"),
- ]
-
- simulate_execute_order: Annotated[
- ContractFunc[tuple[primitives.bytes32, SimulatePricesParams], None],
- Name("simulateExecuteOrder"),
- ]
-
- simulate_execute_shift: Annotated[
- ContractFunc[tuple[primitives.bytes32, SimulatePricesParams], None],
- Name("simulateExecuteShift"),
- ]
-
- simulate_execute_withdrawal: Annotated[
- ContractFunc[tuple[primitives.bytes32, SimulatePricesParams, primitives.uint8], None],
- Name("simulateExecuteWithdrawal"),
- ]
-
- update_order: Annotated[
- ContractFunc[
- tuple[
- primitives.bytes32, primitives.uint256, primitives.uint256, primitives.uint256, primitives.uint256, bool
- ],
- None,
- ],
- Name("updateOrder"),
- ]
-
- withdrawal_handler: Annotated[
- ContractFunc[NoArgs, primitives.address],
- Name("withdrawalHandler"),
- ]
-
-
-exchange_router = ExchangeRouter[Arbitrum](address=EXCHANGE_ROUTER_ADDRESS)
diff --git a/src/talos/contracts/gmx/contracts/synthetics_reader/__init__.py b/src/talos/contracts/gmx/contracts/synthetics_reader/__init__.py
deleted file mode 100644
index ef47db43..00000000
--- a/src/talos/contracts/gmx/contracts/synthetics_reader/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from eth_rpc.networks import Arbitrum
-
-from .reader import SyntheticsReader
-from .schemas import (
- DepositAmountOutParams,
- ExecutionPriceParams,
- GetMarketParams,
- GetMarketsParams,
- GetOpenInterestParams,
- GetPnlParams,
-)
-from .types import (
- MarketProps,
- MarketUtilsMarketPrices,
- OrderProps,
- PositionProps,
- PriceProps,
- ReaderPricingUtilsExecutionPriceResult,
- ReaderUtilsMarketInfo,
- ReaderUtilsPositionInfo,
-)
-
-synthetics_reader = SyntheticsReader[Arbitrum](address="0x5Ca84c34a381434786738735265b9f3FD814b824")
-
-
-__all__ = [
- "DepositAmountOutParams",
- "ExecutionPriceParams",
- "GetMarketsParams",
- "GetOpenInterestParams",
- "GetPnlParams",
- "MarketProps",
- "ReaderUtilsMarketInfo",
- "SyntheticsReader",
- "PositionProps",
- "OrderProps",
- "PriceProps",
- "ReaderUtilsPositionInfo",
- "ReaderPricingUtilsExecutionPriceResult",
- "reader_contract",
- "SyntheticsReader",
- "GetMarketParams",
- "MarketUtilsMarketPrices",
- "PositionProps",
- "OrderProps",
- "PriceProps",
- "ReaderUtilsPositionInfo",
- "ReaderPricingUtilsExecutionPriceResult",
- "reader_contract",
-]
diff --git a/src/talos/contracts/gmx/contracts/synthetics_reader/enums.py b/src/talos/contracts/gmx/contracts/synthetics_reader/enums.py
deleted file mode 100644
index bb104b75..00000000
--- a/src/talos/contracts/gmx/contracts/synthetics_reader/enums.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from enum import IntEnum
-
-
-class SwapPricingType(IntEnum):
- Swap = 0
- Shift = 1
- Atomic = 2
- Deposit = 3
- Withdrawal = 4
-
-
-class OrderType(IntEnum):
- MarketSwap = 0
- LimitSwap = 1
- MarketIncrease = 2
- LimitIncrease = 3
- MarketDecrease = 4
- LimitDecrease = 5
- StopLossDecrease = 6
- Liquidation = 7
-
-
-class DecreasePositionSwapType(IntEnum):
- NoSwap = 0
- SwapPnlTokenToCollateralToken = 1
- SwapCollateralTokenToPnlToken = 2
diff --git a/src/talos/contracts/gmx/contracts/synthetics_reader/reader.py b/src/talos/contracts/gmx/contracts/synthetics_reader/reader.py
deleted file mode 100644
index 3786243d..00000000
--- a/src/talos/contracts/gmx/contracts/synthetics_reader/reader.py
+++ /dev/null
@@ -1,244 +0,0 @@
-from typing import Annotated
-
-from eth_typing import HexAddress
-
-from eth_rpc import ProtocolBase, ContractFunc
-from eth_rpc.types import primitives, Name
-
-from .schemas import (
- DepositAmountOutParams,
- ExecutionPriceParams,
- GetMarketsParams,
- GetMarketParams,
- GetOpenInterestParams,
- GetPnlParams,
- GetMarketTokenPriceParams,
- GetMarketTokenPriceResponse,
- SwapAmountOutParams,
- SwapAmountOutResponse,
- WithdrawalAmountOutParams,
- WithdrawalAmountOutResponse
-)
-from .types import (
- OrderProps,
- ReaderUtilsPositionInfo,
- ReaderPricingUtilsExecutionPriceResult,
- PositionProps,
- MarketProps,
- DepositProps,
- WithdrawalProps,
- ShiftProps,
- PriceProps,
- MarketUtilsMarketPrices,
- ReaderUtilsMarketInfo
-)
-
-
-class SyntheticsReader(ProtocolBase):
- get_account_orders: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, primitives.uint256, primitives.uint256],
- list[OrderProps]
- ],
- Name("getAccountOrders"),
- ]
-
- get_account_position_info_list: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, list[primitives.bytes32], list[MarketUtilsMarketPrices], primitives.address],
- list[ReaderUtilsPositionInfo]
- ],
- Name("getAccountPositionInfoList"),
- ]
-
- get_account_positions: Annotated[
- ContractFunc[
- tuple[HexAddress, HexAddress, primitives.uint256, primitives.uint256],
- list[PositionProps]
- ],
- Name("getAccountPositions"),
- ]
-
- get_adl_state: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, bool, MarketUtilsMarketPrices],
- tuple[primitives.uint256, bool, primitives.int256, primitives.uint256]
- ],
- Name("getAdlState"),
- ]
-
- get_deposit: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- DepositProps
- ],
- Name("getDeposit"),
- ]
-
- get_deposit_amount_out: Annotated[
- ContractFunc[
- DepositAmountOutParams,
- primitives.uint256
- ],
- Name("getDepositAmountOut"),
- ]
-
- get_execution_price: Annotated[
- ContractFunc[
- ExecutionPriceParams,
- ReaderPricingUtilsExecutionPriceResult
- ],
- Name("getExecutionPrice"),
- ]
-
- get_market: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address],
- MarketProps
- ],
- Name("getMarket"),
- ]
-
- get_market_by_salt: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- MarketProps
- ],
- Name("getMarketBySalt"),
- ]
-
- get_market_info: Annotated[
- ContractFunc[
- GetMarketParams,
- ReaderUtilsMarketInfo
- ],
- Name("getMarketInfo"),
- ]
-
- get_market_info_list: Annotated[
- ContractFunc[
- tuple[primitives.address, list[MarketUtilsMarketPrices], primitives.uint256, primitives.uint256],
- list[ReaderUtilsMarketInfo]
- ],
- Name("getMarketInfoList"),
- ]
-
- get_market_token_price: Annotated[
- ContractFunc[
- GetMarketTokenPriceParams,
- GetMarketTokenPriceResponse,
- ],
- Name("getMarketTokenPrice"),
- ]
-
- get_markets: Annotated[
- ContractFunc[
- GetMarketsParams,
- list[MarketProps]
- ],
- Name("getMarkets"),
- ]
-
- get_net_pnl: Annotated[
- ContractFunc[
- tuple[primitives.address, MarketProps, PriceProps, bool],
- primitives.int256
- ],
- Name("getNetPnl"),
- ]
-
- get_open_interest_with_pnl: Annotated[
- ContractFunc[
- GetOpenInterestParams,
- primitives.int256
- ],
- Name("getOpenInterestWithPnl"),
- ]
-
- get_order: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- OrderProps
- ],
- Name("getOrder"),
- ]
-
- get_pnl: Annotated[
- ContractFunc[
- GetPnlParams,
- primitives.int256
- ],
- Name("getPnl"),
- ]
-
- get_pnl_to_pool_factor: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, MarketUtilsMarketPrices, bool, bool],
- primitives.int256
- ],
- Name("getPnlToPoolFactor"),
- ]
-
- get_position: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- PositionProps
- ],
- Name("getPosition"),
- ]
-
- get_position_info: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, primitives.bytes32, MarketUtilsMarketPrices, primitives.uint256, primitives.address, bool],
- ReaderUtilsPositionInfo
- ],
- Name("getPositionInfo"),
- ]
-
- get_position_pnl_usd: Annotated[
- ContractFunc[
- tuple[primitives.address, MarketProps, MarketUtilsMarketPrices, primitives.bytes32, primitives.uint256],
- tuple[primitives.int256, primitives.int256, primitives.uint256]
- ],
- Name("getPositionPnlUsd"),
- ]
-
- get_shift: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- ShiftProps
- ],
- Name("getShift"),
- ]
-
- get_swap_amount_out: Annotated[
- ContractFunc[
- SwapAmountOutParams,
- SwapAmountOutResponse,
- ],
- Name("getSwapAmountOut"),
- ]
-
- get_swap_price_impact: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.address, primitives.address, primitives.address, primitives.uint256, PriceProps, PriceProps],
- tuple[primitives.int256, primitives.int256, primitives.int256]
- ],
- Name("getSwapPriceImpact"),
- ]
-
- get_withdrawal: Annotated[
- ContractFunc[
- tuple[primitives.address, primitives.bytes32],
- WithdrawalProps
- ],
- Name("getWithdrawal"),
- ]
-
- get_withdrawal_amount_out: Annotated[
- ContractFunc[
- WithdrawalAmountOutParams,
- WithdrawalAmountOutResponse,
- ],
- Name("getWithdrawalAmountOut"),
- ]
diff --git a/src/talos/contracts/gmx/contracts/synthetics_reader/schemas.py b/src/talos/contracts/gmx/contracts/synthetics_reader/schemas.py
deleted file mode 100644
index 958c737c..00000000
--- a/src/talos/contracts/gmx/contracts/synthetics_reader/schemas.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from typing import Annotated
-
-from eth_rpc.types import Name, primitives
-from eth_typing import HexAddress
-from pydantic import BaseModel
-
-from .enums import SwapPricingType
-from .types import MarketPoolValueInfoProps, MarketProps, MarketUtilsMarketPrices, PriceProps, SwapPricingUtilsSwapFees
-
-
-class ExecutionPriceParams(BaseModel):
- data_store: Annotated[HexAddress, Name("dataStore")]
- market_key: Annotated[HexAddress, Name("marketKey")]
- index_token_price: Annotated[PriceProps, Name("indexTokenPrice")]
- position_size_in_usd: Annotated[primitives.uint256, Name("positionSizeInUsd")]
- position_size_in_tokens: Annotated[primitives.uint256, Name("positionSizeInTokens")]
- size_delta_usd: Annotated[primitives.int256, Name("sizeDeltaUsd")]
- is_long: Annotated[bool, Name("isLong")]
-
-
-class SwapAmountOutParams(BaseModel):
- data_store: Annotated[primitives.address, Name("dataStore")]
- market: MarketProps
- prices: MarketUtilsMarketPrices
- token_in: primitives.address
- amount_in: primitives.address
- ui_fee_receiver: primitives.address
-
-
-class SwapAmountOutResponse(BaseModel):
- cache_amount_out: primitives.uint256
- impactAmount: primitives.uint256
- fees: SwapPricingUtilsSwapFees
-
-
-class DepositAmountOutParams(BaseModel):
- data_store: primitives.address
- market: MarketProps
- prices: MarketUtilsMarketPrices
- long_token_amount: primitives.uint256
- short_token_amount: primitives.uint256
- ui_fee_receiver: primitives.address
- swap_pricing_type: SwapPricingType
- include_virtual_inventory_impact: bool
-
-
-class WithdrawalAmountOutParams(BaseModel):
- data_store: primitives.address
- market: MarketProps
- prices: MarketUtilsMarketPrices
- market_token_amount: primitives.uint256
- ui_fee_receiver: primitives.address
- swap_pricing_type: SwapPricingType
-
-
-class WithdrawalAmountOutResponse(BaseModel):
- long_amount_after_fees: primitives.uint256
- short_amount_after_fees: primitives.uint256
-
-
-class GetMarketsParams(BaseModel):
- data_store: HexAddress
- start_index: primitives.uint256
- end_index: primitives.uint256
-
-
-class GetOpenInterestParams(BaseModel):
- data_store: HexAddress
- market: MarketProps
- index_token_price: PriceProps
- is_long: bool
- maximize: bool
-
-
-class GetPnlParams(BaseModel):
- data_store: HexAddress
- market: MarketProps
- index_token_price: PriceProps
- is_long: bool
- maximize: bool
-
-
-class GetMarketParams(BaseModel):
- data_store: HexAddress
- prices: MarketUtilsMarketPrices
- market_key: HexAddress
-
-
-class GetMarketTokenPriceParams(BaseModel):
- data_store: primitives.address
- market: MarketProps
- index_token_price: PriceProps
- long_token_price: PriceProps
- short_token_price: PriceProps
- pnl_factor_type: primitives.bytes32
- maximize: bool
-
-
-class GetMarketTokenPriceResponse(BaseModel):
- market_token_price: primitives.int256
- pool_value_info: MarketPoolValueInfoProps
diff --git a/src/talos/contracts/gmx/contracts/synthetics_reader/types.py b/src/talos/contracts/gmx/contracts/synthetics_reader/types.py
deleted file mode 100644
index daf4643d..00000000
--- a/src/talos/contracts/gmx/contracts/synthetics_reader/types.py
+++ /dev/null
@@ -1,306 +0,0 @@
-from typing import Annotated
-
-from eth_rpc.types import primitives, Name, Struct
-from eth_rpc.utils import to_checksum
-from pydantic import BeforeValidator
-
-class WithdrawalFlags(Struct):
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
-
-
-class WithdrawalNumbers(Struct):
- market_token_amount: Annotated[primitives.uint256, Name("marketTokenAmount")]
- min_long_token_amount: Annotated[primitives.uint256, Name("minLongTokenAmount")]
- min_short_token_amount: Annotated[primitives.uint256, Name("minShortTokenAmount")]
- updated_at_block: Annotated[primitives.uint256, Name("updatedAtBlock")]
- updated_at_time: Annotated[primitives.uint256, Name("updatedAtTime")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class WithdrawalAddresses(Struct):
- account: primitives.address
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- market: primitives.address
- long_token_swap_path: Annotated[list[primitives.address], Name("longTokenSwapPath")]
- short_token_swap_path: Annotated[list[primitives.address], Name("shortTokenSwapPath")]
-
-
-class ShiftNumbers(Struct):
- market_token_amount: Annotated[primitives.uint256, Name("marketTokenAmount")]
- min_market_tokens: Annotated[primitives.uint256, Name("minMarketTokens")]
- updated_at_time: Annotated[primitives.uint256, Name("updatedAtTime")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class ShiftAddresses(Struct):
- account: primitives.address
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- from_market: Annotated[primitives.address, Name("fromMarket")]
- to_market: Annotated[primitives.address, Name("toMarket")]
-
-
-class ReaderUtilsVirtualInventory(Struct):
- virtual_pool_amount_for_long_token: Annotated[primitives.uint256, Name("virtualPoolAmountForLongToken")]
- virtual_pool_amount_for_short_token: Annotated[primitives.uint256, Name("virtualPoolAmountForShortToken")]
- virtual_inventory_for_positions: Annotated[primitives.int256, Name("virtualInventoryForPositions")]
-
-
-class MarketUtilsCollateralType(Struct):
- long_token: Annotated[primitives.uint256, Name("longToken")]
- short_token: Annotated[primitives.uint256, Name("shortToken")]
-
-
-class MarketUtilsPositionType(Struct):
- long: Annotated[MarketUtilsCollateralType, Name("long")]
- short: Annotated[MarketUtilsCollateralType, Name("short")]
-
-
-class MarketUtilsGetNextFundingAmountPerSizeResult(Struct):
- longs_pay_shorts: Annotated[bool, Name("longsPayShorts")]
- funding_factor_per_second: Annotated[primitives.uint256, Name("fundingFactorPerSecond")]
- next_saved_funding_factor_per_second: Annotated[primitives.int256, Name("nextSavedFundingFactorPerSecond")]
- funding_fee_amount_per_size_delta: Annotated[MarketUtilsPositionType, Name("fundingFeeAmountPerSizeDelta")]
- claimable_funding_amount_per_size_delta: Annotated[MarketUtilsPositionType, Name("claimableFundingAmountPerSizeDelta")]
-
-
-class ReaderUtilsBaseFundingValues(Struct):
- funding_fee_amount_per_size: Annotated[MarketUtilsPositionType, Name("fundingFeeAmountPerSize")]
- claimable_funding_amount_per_size: Annotated[MarketUtilsPositionType, Name("claimableFundingAmountPerSize")]
-
-
-class DepositFlags(Struct):
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
-
-
-class DepositNumbers(Struct):
- initial_long_token_amount: Annotated[primitives.uint256, Name("initialLongTokenAmount")]
- initial_short_token_amount: Annotated[primitives.uint256, Name("initialShortTokenAmount")]
- min_market_tokens: Annotated[primitives.uint256, Name("minMarketTokens")]
- updated_at_block: Annotated[primitives.uint256, Name("updatedAtBlock")]
- updated_at_time: Annotated[primitives.uint256, Name("updatedAtTime")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
-
-
-class DepositAddresses(Struct):
- account: primitives.address
- receiver: primitives.address
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- market: primitives.address
- initial_long_token: Annotated[primitives.address, Name("initialLongToken")]
- initial_short_token: Annotated[primitives.address, Name("initialShortToken")]
- long_token_swap_path: Annotated[list[primitives.address], Name("longTokenSwapPath")]
- short_token_swap_path: Annotated[list[primitives.address], Name("shortTokenSwapPath")]
-
-
-class PositionFlags(Struct):
- is_long: Annotated[bool, Name("isLong")]
-
-
-class PositionNumbers(Struct):
- size_in_usd: Annotated[primitives.uint256, Name("sizeInUsd")]
- size_in_tokens: Annotated[primitives.uint256, Name("sizeInTokens")]
- collateral_amount: Annotated[primitives.uint256, Name("collateralAmount")]
- borrowing_factor: Annotated[primitives.uint256, Name("borrowingFactor")]
- funding_fee_amount_per_size: Annotated[primitives.uint256, Name("fundingFeeAmountPerSize")]
- long_token_claimable_funding_amount_per_size: Annotated[primitives.uint256, Name("longTokenClaimableFundingAmountPerSize")]
- short_token_claimable_funding_amount_per_size: Annotated[primitives.uint256, Name("shortTokenClaimableFundingAmountPerSize")]
- increased_at_block: Annotated[primitives.uint256, Name("increasedAtBlock")]
- decreased_at_block: Annotated[primitives.uint256, Name("decreasedAtBlock")]
- increased_at_time: Annotated[primitives.uint256, Name("increasedAtTime")]
- decreased_at_time: Annotated[primitives.uint256, Name("decreasedAtTime")]
-
-
-class PositionAddresses(Struct):
- account: Annotated[primitives.address, BeforeValidator(to_checksum)]
- market: Annotated[primitives.address, BeforeValidator(to_checksum)]
- collateral_token: Annotated[primitives.address, Name("collateralToken"), BeforeValidator(to_checksum)]
-
-
-class PositionPricingUtilsPositionReferralFees(Struct):
- referral_code: primitives.bytes32
- affiliate: primitives.address
- trader: primitives.address
- total_rebate_factor: Annotated[primitives.uint256, Name("totalRebateFactor")]
- affiliate_reward_factor: Annotated[primitives.uint256, Name("affiliateRewardFactor")]
- adjusted_affiliate_reward_factor: Annotated[primitives.uint256, Name("adjustedAffiliateRewardFactor")]
- trader_discount_factor: Annotated[primitives.uint256, Name("traderDiscountFactor")]
- total_rebate_amount: Annotated[primitives.uint256, Name("totalRebateAmount")]
- trader_discount_amount: Annotated[primitives.uint256, Name("traderDiscountAmount")]
- affiliate_reward_amount: Annotated[primitives.uint256, Name("affiliateRewardAmount")]
-
-
-class PositionPricingUtilsPositionFundingFees(Struct):
- funding_fee_amount: Annotated[primitives.uint256, Name("fundingFeeAmount")]
- claimable_long_token_amount: Annotated[primitives.uint256, Name("claimableLongTokenAmount")]
- claimable_short_token_amount: Annotated[primitives.uint256, Name("claimableShortTokenAmount")]
- latest_funding_fee_amount_per_size: Annotated[primitives.uint256, Name("latestFundingFeeAmountPerSize")]
- latest_long_token_claimable_funding_amount_per_size: Annotated[primitives.uint256, Name("latestLongTokenClaimableFundingAmountPerSize")]
- latest_short_token_claimable_funding_amount_per_size: Annotated[primitives.uint256, Name("latestShortTokenClaimableFundingAmountPerSize")]
-
-
-class PositionPricingUtilsPositionBorrowingFees(Struct):
- borrowing_fee_usd: Annotated[primitives.uint256, Name("borrowingFeeUsd")]
- borrowing_fee_amount: Annotated[primitives.uint256, Name("borrowingFeeAmount")]
- borrowing_fee_receiver_factor: Annotated[primitives.uint256, Name("borrowingFeeReceiverFactor")]
- borrowing_fee_amount_for_fee_receiver: Annotated[primitives.uint256, Name("borrowingFeeAmountForFeeReceiver")]
-
-
-class PositionPricingUtilsPositionUiFees(Struct):
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- ui_fee_receiver_factor: Annotated[primitives.uint256, Name("uiFeeReceiverFactor")]
- ui_fee_amount: Annotated[primitives.uint256, Name("uiFeeAmount")]
-
-
-class PriceProps(Struct):
- min: primitives.uint256
- max: primitives.uint256
-
-
-class PositionPricingUtilsPositionFees(Struct):
- referral: PositionPricingUtilsPositionReferralFees
- funding: PositionPricingUtilsPositionFundingFees
- borrowing: PositionPricingUtilsPositionBorrowingFees
- ui: PositionPricingUtilsPositionUiFees
- collateral_token_price: Annotated[PriceProps, Name("collateralTokenPrice")]
- position_fee_factor: Annotated[primitives.uint256, Name("positionFeeFactor")]
- protocol_fee_amount: Annotated[primitives.uint256, Name("protocolFeeAmount")]
- position_fee_receiver_factor: Annotated[primitives.uint256, Name("positionFeeReceiverFactor")]
- fee_receiver_amount: Annotated[primitives.uint256, Name("feeReceiverAmount")]
- fee_amount_for_pool: Annotated[primitives.uint256, Name("feeAmountForPool")]
- position_fee_amount_for_pool: Annotated[primitives.uint256, Name("positionFeeAmountForPool")]
- position_fee_amount: Annotated[primitives.uint256, Name("positionFeeAmount")]
- total_cost_amount_excluding_funding: Annotated[primitives.uint256, Name("totalCostAmountExcludingFunding")]
- total_cost_amount: Annotated[primitives.uint256, Name("totalCostAmount")]
-
-
-class OrderFlags(Struct):
- is_long: Annotated[bool, Name("isLong")]
- should_unwrap_native_token: Annotated[bool, Name("shouldUnwrapNativeToken")]
- is_frozen: Annotated[bool, Name("isFrozen")]
- auto_cancel: Annotated[bool, Name("autoCancel")]
-
-
-class OrderNumbers(Struct):
- order_type: Annotated[primitives.uint8, Name("orderType")]
- decrease_position_swap_type: Annotated[primitives.uint8, Name("decreasePositionSwapType")]
- size_delta_usd: Annotated[primitives.uint256, Name("sizeDeltaUsd")]
- initial_collateral_delta_amount: Annotated[primitives.uint256, Name("initialCollateralDeltaAmount")]
- trigger_price: Annotated[primitives.uint256, Name("triggerPrice")]
- acceptable_price: Annotated[primitives.uint256, Name("acceptablePrice")]
- execution_fee: Annotated[primitives.uint256, Name("executionFee")]
- callback_gas_limit: Annotated[primitives.uint256, Name("callbackGasLimit")]
- min_output_amount: Annotated[primitives.uint256, Name("minOutputAmount")]
- updated_at_block: Annotated[primitives.uint256, Name("updatedAtBlock")]
- updated_at_time: Annotated[primitives.uint256, Name("updatedAtTime")]
-
-
-class OrderAddresses(Struct):
- account: primitives.address
- receiver: primitives.address
- cancellation_receiver: Annotated[primitives.address, Name("cancellationReceiver")]
- callback_contract: Annotated[primitives.address, Name("callbackContract")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- market: primitives.address
- initial_collateral_token: Annotated[primitives.address, Name("initialCollateralToken")]
- swap_path: Annotated[list[primitives.address], Name("swapPath")]
-
-
-class WithdrawalProps(Struct):
- addresses: WithdrawalAddresses
- numbers: WithdrawalNumbers
- flags: WithdrawalFlags
-
-
-class SwapPricingUtilsSwapFees(Struct):
- fee_receiver_amount: Annotated[primitives.uint256, Name("feeReceiverAmount")]
- fee_amount_for_pool: Annotated[primitives.uint256, Name("feeAmountForPool")]
- amount_after_fees: Annotated[primitives.uint256, Name("amountAfterFees")]
- ui_fee_receiver: Annotated[primitives.address, Name("uiFeeReceiver")]
- ui_fee_receiver_factor: Annotated[primitives.uint256, Name("uiFeeReceiverFactor")]
- ui_fee_amount: Annotated[primitives.uint256, Name("uiFeeAmount")]
-
-
-class ShiftProps(Struct):
- addresses: ShiftAddresses
- numbers: ShiftNumbers
-
-
-class MarketPoolValueInfoProps(Struct):
- pool_value: Annotated[primitives.int256, Name("poolValue")]
- long_pnl: Annotated[primitives.int256, Name("longPnl")]
- short_pnl: Annotated[primitives.int256, Name("shortPnl")]
- net_pnl: Annotated[primitives.int256, Name("netPnl")]
- long_token_amount: Annotated[primitives.uint256, Name("longTokenAmount")]
- short_token_amount: Annotated[primitives.uint256, Name("shortTokenAmount")]
- long_token_usd: Annotated[primitives.uint256, Name("longTokenUsd")]
- short_token_usd: Annotated[primitives.uint256, Name("shortTokenUsd")]
- total_borrowing_fees: Annotated[primitives.uint256, Name("totalBorrowingFees")]
- borrowing_fee_pool_factor: Annotated[primitives.uint256, Name("borrowingFeePoolFactor")]
- impact_pool_amount: Annotated[primitives.uint256, Name("impactPoolAmount")]
-
-
-class MarketProps(Struct):
- market_token: Annotated[primitives.address, Name("marketToken"), BeforeValidator(to_checksum)]
- index_token: Annotated[primitives.address, Name("indexToken"), BeforeValidator(to_checksum)]
- long_token: Annotated[primitives.address, Name("longToken"), BeforeValidator(to_checksum)]
- short_token: Annotated[primitives.address, Name("shortToken"), BeforeValidator(to_checksum)]
-
-
-class ReaderUtilsMarketInfo(Struct):
- market: MarketProps
- borrowing_factor_per_second_for_longs: Annotated[primitives.uint256, Name("borrowingFactorPerSecondForLongs")]
- borrowing_factor_per_second_for_shorts: Annotated[primitives.uint256, Name("borrowingFactorPerSecondForShorts")]
- base_funding: Annotated[ReaderUtilsBaseFundingValues, Name("baseFunding")]
- next_funding: Annotated[MarketUtilsGetNextFundingAmountPerSizeResult, Name("nextFunding")]
- virtual_inventory: Annotated[ReaderUtilsVirtualInventory, Name("virtualInventory")]
- is_disabled: Annotated[bool, Name("isDisabled")]
-
-
-class ReaderPricingUtilsExecutionPriceResult(Struct):
- price_impact_usd: Annotated[primitives.int256, Name("priceImpactUsd")]
- price_impact_diff_usd: Annotated[primitives.uint256, Name("priceImpactDiffUsd")]
- execution_price: Annotated[primitives.uint256, Name("executionPrice")]
-
-
-class DepositProps(Struct):
- addresses: DepositAddresses
- numbers: DepositNumbers
- flags: DepositFlags
-
-
-class MarketUtilsMarketPrices(Struct):
- index_token_price: Annotated[PriceProps, Name("indexTokenPrice")]
- long_token_price: Annotated[PriceProps, Name("longTokenPrice")]
- short_token_price: Annotated[PriceProps, Name("shortTokenPrice")]
-
-
-class PositionProps(Struct):
- addresses: PositionAddresses
- numbers: PositionNumbers
- flags: PositionFlags
-
-
-class ReaderUtilsPositionInfo(Struct):
- position: PositionProps
- fees: PositionPricingUtilsPositionFees
- execution_price_result: Annotated[
- ReaderPricingUtilsExecutionPriceResult,
- Name("executionPriceResult"),
- ]
- base_pnl_usd: Annotated[primitives.int256, Name("basePnlUsd")]
- uncapped_base_pnl_usd: Annotated[primitives.int256, Name("uncappedBasePnlUsd")]
- pnl_after_price_impact_usd: Annotated[primitives.int256, Name("pnlAfterPriceImpactUsd")]
-
-
-class OrderProps(Struct):
- addresses: OrderAddresses
- numbers: OrderNumbers
- flags: OrderFlags
diff --git a/src/talos/contracts/gmx/getters/__init__.py b/src/talos/contracts/gmx/getters/__init__.py
deleted file mode 100644
index dbe4f2c3..00000000
--- a/src/talos/contracts/gmx/getters/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .claimable_fees import GetClaimableFees
-from .funding_apr import GetFundingFee
-from .get import GetData
-from .open_interest import OpenInterest
-from .prices import OraclePrices
-
-__all__ = ['GetClaimableFees', 'GetFundingFee', 'GetData', 'OpenInterest', 'OraclePrices']
diff --git a/src/talos/contracts/gmx/getters/borrow_apr.py b/src/talos/contracts/gmx/getters/borrow_apr.py
deleted file mode 100644
index 1954f80b..00000000
--- a/src/talos/contracts/gmx/getters/borrow_apr.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import asyncio
-import logging
-
-from ..contracts.synthetics_reader.types import ReaderUtilsMarketInfo
-from .get import GetData
-
-
-class GetBorrowAPR(GetData):
- async def _get_data_processing(self) -> dict[str, dict[str, str | float] | str]:
- """
- Generate the dictionary of borrow APR data
-
- Returns
- -------
- funding_apr : dict
- dictionary of borrow data.
-
- """
- output_list = []
- mapper = []
- for market_key in self.markets._info:
- index_token_address = self.markets.get_index_token_address(market_key)
-
- self._get_token_addresses(market_key)
- output = self._get_oracle_prices(
- market_key,
- index_token_address,
- )
-
- output_list.append(output)
- mapper.append(self.markets.get_market_symbol(market_key))
-
- threaded_output: list[ReaderUtilsMarketInfo] = await asyncio.gather(*output_list)
-
- for key, market_info in zip(mapper, threaded_output):
- self._output["long"][key] = (market_info.borrowing_factor_per_second_for_longs / 10**28) * 3600 # type: ignore
- self._output["short"][key] = (market_info.borrowing_factor_per_second_for_shorts / 10**28) * 3600 # type: ignore
-
- logging.info(
- ("{}\nLong Borrow Hourly Rate: -{:.5f}%\nShort Borrow Hourly Rate: -{:.5f}%\n").format(
- key,
- self._output["long"][key], # type: ignore
- self._output["short"][key], # type: ignore
- )
- )
-
- self._output["parameter"] = "borrow_apr"
-
- return self._output
diff --git a/src/talos/contracts/gmx/getters/claimable_fees.py b/src/talos/contracts/gmx/getters/claimable_fees.py
deleted file mode 100644
index 3f5052c1..00000000
--- a/src/talos/contracts/gmx/getters/claimable_fees.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import asyncio
-import logging
-
-from eth_typing import HexAddress, HexStr
-from eth_rpc import PrivateKeyWallet
-
-from ..contracts import datastore, exchange_router
-from .get import GetData
-from .prices import OraclePrices
-from ..utils import median, numerize
-from ..utils.keys import claimable_fee_amount_key, claimable_funding_amount_key
-
-
-class GetClaimableFees(GetData):
- """
- Get total fees dictionary
-
- > await GetClaimableFees().get_data()
- """
-
- async def claim_market_fees(self, market: HexAddress, token: HexAddress, wallet: PrivateKeyWallet) -> HexStr | None:
- claimable_fee = await self.get_claimable_funding_fees(market, token, wallet.address)
- if claimable_fee > 0:
- tx_hash = await exchange_router.claim_funding_fees(market, token, wallet.address).execute(wallet)
- return tx_hash
- return None
-
- async def get_claimable_funding_fees(self, market: HexAddress, token: HexAddress, address: HexAddress):
- claimable_fee = claimable_funding_amount_key(market, token, address)
- claimable_fee = await datastore.get_uint(claimable_fee).get()
- return claimable_fee
-
- async def _get_data_processing(self):
- """
- Get total fees dictionary
-
- Returns
- -------
- funding_apr : dict
- dictionary of total fees for week so far.
-
- """
- total_fees = 0
- long_output_list = []
- short_output_list = []
- long_precision_list = []
- long_token_price_list = []
- mapper = []
-
- for market_key in self.markets.info:
- self._filter_swap_markets()
- self._get_token_addresses(market_key)
- market_symbol = self.markets.get_market_symbol(market_key)
- long_decimal_factor = self.markets.get_decimal_factor(
- market_key=market_key,
- long=True,
- short=False
- )
- long_precision = 10**(long_decimal_factor - 1)
- oracle_precision = 10**(30 - long_decimal_factor)
-
- long_output = self._get_claimable_fee_amount(
- market_key,
- self._long_token_address
- )
-
- prices = await OraclePrices().get_recent_prices()
- long_token_price = median(
- [
- float(
- prices[self._long_token_address].max_price_full
- ) / oracle_precision,
- float(
- prices[self._long_token_address].min_price_full
- ) / oracle_precision
- ]
- )
-
- long_token_price_list.append(long_token_price)
- long_precision_list.append(long_precision)
-
- short_output = self._get_claimable_fee_amount(
- market_key,
- self._short_token_address
- )
-
- # add the uncalled web3 objects to list
- long_output_list = long_output_list + [long_output]
- short_output_list = short_output_list + [short_output]
-
- # add the market symbol to a list to use to map to dictionary later
- mapper.append(market_symbol)
-
- # feed the uncalled web3 objects into threading function
- long_threaded_output = await asyncio.gather(*long_output_list)
- short_threaded_output = await asyncio.gather(*short_output_list)
-
- for (
- long_claimable_fees,
- short_claimable_fees,
- long_precision,
- long_token_price,
- token_symbol
- ) in zip(
- long_threaded_output,
- short_threaded_output,
- long_precision_list,
- long_token_price_list,
- mapper
- ):
- # convert raw outputs into USD value
- long_claimable_usd = (
- long_claimable_fees / long_precision
- ) * long_token_price
-
- # TODO - currently all short fees are collected in USDC which is
- # 6 decimals
- short_claimable_usd = short_claimable_fees / (10 ** 6)
-
- if "2" in token_symbol:
- short_claimable_usd = 0
-
- logging.info(f"Token: {token_symbol}")
-
- logging.info(
- f"""Long Claimable Fees:
- ${numerize(long_claimable_usd)}"""
- )
-
- logging.info(
- f"""Short Claimable Fees:
- ${numerize(short_claimable_usd)}"""
- )
-
- total_fees += long_claimable_usd + short_claimable_usd
-
- return {'total_fees': total_fees,
- "parameter": "total_fees"}
-
- async def _get_claimable_fee_amount(
- self, market_address: str, token_address: str
- ):
- """
- For a given market and long/short side of the pool get the raw output
- for pending fees
-
- Parameters
- ----------
- market_address : str
- addess of the GMX market.
- token_address : str
- address of either long or short collateral token.
-
- Returns
- -------
- claimable_fee : web3 datastore obj
- uncalled obj of the datastore contract.
-
- """
-
- # create hashed key to query the datastore
- claimable_fees_amount_hash_data = claimable_fee_amount_key(
- market_address,
- token_address
- )
-
- claimable_fee = await datastore.get_uint(
- claimable_fees_amount_hash_data
- ).get()
-
- return claimable_fee
diff --git a/src/talos/contracts/gmx/getters/funding_apr.py b/src/talos/contracts/gmx/getters/funding_apr.py
deleted file mode 100644
index ea577891..00000000
--- a/src/talos/contracts/gmx/getters/funding_apr.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import asyncio
-import json
-import os
-from typing import Awaitable
-
-from pydantic import Field
-
-from ..contracts.synthetics_reader.types import ReaderUtilsMarketInfo
-from ..utils.funding import get_funding_factor_per_period
-from .get import GetData
-from .open_interest import OpenInterest
-
-base_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "data_store")
-
-
-class GetFundingFee(GetData):
- open_interest: OpenInterest = Field(default_factory=OpenInterest)
-
- async def _get_data_processing(self) -> dict[str, dict[str, str | float] | str]:
- """
- Generate the dictionary of funding APR data
-
- Returns
- -------
- funding_apr : dict
- dictionary of funding data.
-
- """
-
- # If passing true will use local instance of open interest data
- if self.use_local_datastore:
- open_interest = json.load(open(os.path.join(base_dir, "data_store", "open_interest.json")))
- else:
- open_interest = await self.open_interest.get_data(to_json=False)
-
- print("\nGMX v2 Funding Rates (% per hour)")
-
- # define empty lists to pass to zip iterater later on
- mapper = []
- output_list: list[Awaitable[ReaderUtilsMarketInfo]] = []
- long_interest_usd_list: list[int] = []
- short_interest_usd_list: list[int] = []
-
- # loop markets
- for market_key in self.markets._info:
- symbol = self.markets.get_market_symbol(market_key)
- index_token_address = self.markets.get_index_token_address(market_key)
- self._get_token_addresses(market_key)
-
- output: Awaitable[ReaderUtilsMarketInfo] = self._get_oracle_prices(
- market_key,
- index_token_address,
- )
-
- mapper.append(symbol)
- output_list.append(output)
- long_interest_usd_list = long_interest_usd_list + [open_interest["long"][symbol] * 10**30]
- short_interest_usd_list = short_interest_usd_list + [open_interest["short"][symbol] * 10**30]
-
- # Multithreaded call on contract
- threaded_output: list[ReaderUtilsMarketInfo] = await asyncio.gather(*output_list)
-
- for market_info, long_interest_usd, short_interest_usd, symbol in zip(
- threaded_output, long_interest_usd_list, short_interest_usd_list, mapper
- ):
- print("\n{}".format(symbol))
-
- long_funding_fee = get_funding_factor_per_period(
- market_info, True, 3600, long_interest_usd, short_interest_usd
- )
- print("Long funding hrly rate {:.4f}%".format(long_funding_fee))
-
- short_funding_fee = get_funding_factor_per_period(
- market_info, False, 3600, long_interest_usd, short_interest_usd
- )
- print("Short funding hrly rate {:.4f}%".format(short_funding_fee))
-
- self.output["long"][symbol] = long_funding_fee # type: ignore
- self.output["short"][symbol] = short_funding_fee # type: ignore
-
- self.output["parameter"] = "funding_apr"
-
- return self.output
diff --git a/src/talos/contracts/gmx/getters/funding_fees.py b/src/talos/contracts/gmx/getters/funding_fees.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/contracts/gmx/getters/get.py b/src/talos/contracts/gmx/getters/get.py
deleted file mode 100644
index fa925378..00000000
--- a/src/talos/contracts/gmx/getters/get.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import logging
-from typing import Any, Awaitable, Literal, Optional, overload
-
-from eth_rpc.types.primitives import int256, uint256
-from eth_typing import ChecksumAddress
-from pydantic import BaseModel, Field, PrivateAttr
-
-from ..constants import DATASTORE_ADDRESS
-from ..contracts import SyntheticsReader, synthetics_reader
-from ..contracts.synthetics_reader import (
- GetMarketParams,
- GetOpenInterestParams,
- GetPnlParams,
- MarketProps,
- MarketUtilsMarketPrices,
- PriceProps,
-)
-from ..contracts.synthetics_reader.types import ReaderUtilsMarketInfo
-from ..utils.datastore import make_timestamped_dataframe, save_csv_to_datastore, save_json_file_to_datastore
-from .markets import Markets
-from .prices import OraclePrices
-
-
-class GetData(BaseModel):
- use_local_datastore: bool = Field(default=False)
- filter_swap_markets: bool = Field(default=True)
- markets: Markets = Field(default_factory=Markets)
- reader_contract: SyntheticsReader = Field(default_factory=lambda: synthetics_reader)
-
- _long_token_address: Optional[ChecksumAddress] = PrivateAttr(default=None)
- _short_token_address: Optional[ChecksumAddress] = PrivateAttr(default=None)
-
- _output: dict[str, dict[str, str | float] | str] = PrivateAttr(default_factory=lambda: {"long": {}, "short": {}})
-
- async def load(self) -> None:
- await self.markets.load_info()
-
- async def get_data(self, to_json: bool = False, to_csv: bool = False) -> dict[str, Any]:
- if self.filter_swap_markets:
- self._filter_swap_markets()
-
- data = await self._get_data_processing()
-
- if to_json:
- parameter = data["parameter"]
- save_json_file_to_datastore("{}_data.json".format(parameter), data)
-
- if to_csv:
- try:
- parameter = data["parameter"]
- dataframe = make_timestamped_dataframe(data["long"])
- save_csv_to_datastore("long_{}_data.csv".format(parameter), dataframe)
- dataframe = make_timestamped_dataframe(data["short"])
- save_csv_to_datastore("short_{}_data.csv".format(parameter), dataframe)
- except KeyError:
- dataframe = make_timestamped_dataframe(data)
- save_csv_to_datastore("{}_data.csv".format(parameter), dataframe)
-
- except Exception as e:
- logging.info(e)
-
- return data
-
- async def _get_data_processing(self) -> dict[str, Any]:
- raise NotImplementedError()
-
- def _get_token_addresses(self, market_key: ChecksumAddress) -> None:
- self._long_token_address = self.markets.get_long_token_address(market_key)
- self._short_token_address = self.markets.get_short_token_address(market_key)
- logging.info(
- "Long Token Address: {}\nShort Token Address: {}".format(
- self._long_token_address, self._short_token_address
- )
- )
-
- def _filter_swap_markets(self) -> None:
- # TODO: Move to markets MAYBE
- keys_to_remove = []
- for market_key in self.markets._info:
- market_symbol = self.markets.get_market_symbol(market_key)
- if "SWAP" in market_symbol:
- # Remove swap markets from dict
- keys_to_remove.append(market_key)
-
- [self.markets._info.pop(k) for k in keys_to_remove]
-
- def _get_pnl(
- self,
- market: MarketProps,
- prices_props: PriceProps,
- is_long: bool,
- maximize: bool = False,
- ) -> tuple[Awaitable[int256], Awaitable[int256]]:
- """returns two coroutines"""
- open_interest_pnl = self.reader_contract.get_open_interest_with_pnl(
- GetOpenInterestParams(
- data_store=DATASTORE_ADDRESS,
- market=market,
- index_token_price=prices_props,
- is_long=is_long,
- maximize=maximize,
- )
- ).get()
-
- pnl = self.reader_contract.get_pnl(
- GetPnlParams(
- data_store=DATASTORE_ADDRESS,
- market=market,
- index_token_price=prices_props,
- is_long=is_long,
- maximize=maximize,
- )
- ).get()
-
- return open_interest_pnl, pnl
-
- @overload
- async def _get_oracle_prices(
- self,
- market_key: ChecksumAddress,
- index_token_address: ChecksumAddress,
- return_tuple: Literal[True],
- ) -> MarketUtilsMarketPrices: ...
-
- @overload
- async def _get_oracle_prices(
- self,
- market_key: ChecksumAddress,
- index_token_address: ChecksumAddress,
- ) -> ReaderUtilsMarketInfo: ...
-
- async def _get_oracle_prices(
- self,
- market_key: ChecksumAddress,
- index_token_address: ChecksumAddress,
- return_tuple: bool = False,
- ) -> ReaderUtilsMarketInfo | MarketUtilsMarketPrices:
- """
- For a given market get the marketInfo from the reader contract
- """
-
- oracle_prices_dict = await OraclePrices.get_recent_prices()
-
- assert self._long_token_address is not None
- assert self._short_token_address is not None
-
- try:
- prices = MarketUtilsMarketPrices(
- index_token_price=PriceProps(
- min=uint256(int((oracle_prices_dict[index_token_address].min_price_full))),
- max=uint256(int((oracle_prices_dict[index_token_address].max_price_full))),
- ),
- long_token_price=PriceProps(
- min=uint256(int(oracle_prices_dict[self._long_token_address].min_price_full)),
- max=uint256(int(oracle_prices_dict[self._long_token_address].max_price_full)),
- ),
- short_token_price=PriceProps(
- min=uint256(int(oracle_prices_dict[self._short_token_address].min_price_full)),
- max=uint256(int(oracle_prices_dict[self._short_token_address].max_price_full)),
- ),
- )
- # TODO - this needs to be here until GMX add stables to signed price
- except KeyError:
- prices = MarketUtilsMarketPrices(
- index_token_price=PriceProps(
- min=uint256(int(oracle_prices_dict[index_token_address].min_price_full)),
- max=uint256(int(oracle_prices_dict[index_token_address].max_price_full)),
- ),
- long_token_price=PriceProps(
- min=uint256(int(oracle_prices_dict[self._long_token_address].min_price_full)),
- max=uint256(int(oracle_prices_dict[self._long_token_address].max_price_full)),
- ),
- short_token_price=PriceProps(
- min=uint256(int(1000000000000000000000000)),
- max=uint256(int(1000000000000000000000000)),
- ),
- )
-
- if return_tuple:
- return prices
-
- response: ReaderUtilsMarketInfo = await self.reader_contract.get_market_info(
- GetMarketParams(
- data_store=DATASTORE_ADDRESS,
- prices=prices,
- market_key=market_key,
- )
- ).get()
- return response
-
- @property
- def output(self) -> dict[str, dict[str, str | float] | str]:
- return self._output
diff --git a/src/talos/contracts/gmx/getters/markets.py b/src/talos/contracts/gmx/getters/markets.py
deleted file mode 100644
index 81f64d76..00000000
--- a/src/talos/contracts/gmx/getters/markets.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import asyncio
-import logging
-
-from eth_rpc.types.primitives import uint256
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress
-from pydantic import BaseModel, PrivateAttr
-
-from ..constants import DATASTORE_ADDRESS
-from ..contracts import synthetics_reader
-from ..contracts.synthetics_reader import GetMarketsParams, MarketProps
-from ..types.markets import Market, MarketMetadata
-from ..utils.tokens import get_tokens_address_dict
-from .prices import OraclePrices
-
-
-class Markets(BaseModel):
- _info: dict[ChecksumAddress, Market] = PrivateAttr(default_factory=dict)
-
- async def load_info(self) -> dict[ChecksumAddress, Market]:
- if not self._info:
- self._info = await self._process_markets()
- return self._info
-
- def get_index_token_address(self, market_key: ChecksumAddress) -> ChecksumAddress:
- return self._info[market_key].index_token_address
-
- def get_long_token_address(self, market_key: ChecksumAddress) -> ChecksumAddress:
- return self._info[market_key].long_token_address
-
- def get_short_token_address(self, market_key: ChecksumAddress) -> ChecksumAddress:
- return self._info[market_key].short_token_address
-
- def get_market_symbol(self, market_key: ChecksumAddress) -> str:
- return self._info[market_key].market_symbol
-
- def get_decimal_factor(self, market_key: ChecksumAddress, long: bool = False, short: bool = False) -> int:
- if long:
- return self._info[market_key].long_token_metadata.decimals
- elif short:
- return self._info[market_key].short_token_metadata.decimals
- else:
- return self._info[market_key].market_metadata.decimals
-
- def is_synthetic(self, market_key: ChecksumAddress) -> bool:
- return self._info[market_key].market_metadata.synthetic
-
- async def get_available_markets(self) -> dict[ChecksumAddress, Market]:
- """
- Get the available markets on a given chain
-
- Returns
- -------
- Markets: dict
- dictionary of the available markets.
-
- """
- logging.info("Getting Available Markets..")
- return await self._process_markets()
-
- async def _get_available_markets_raw(self) -> list[MarketProps]:
- """
- Get the available markets from the reader contract
-
- Returns
- -------
- Markets: tuple
- tuple of raw output from the reader contract.
-
- """
-
- response = await synthetics_reader.get_markets(
- GetMarketsParams(
- data_store=DATASTORE_ADDRESS,
- start_index=uint256(0),
- end_index=uint256(50),
- )
- ).get()
-
- return response # type: ignore
-
- async def _process_markets(self) -> dict[ChecksumAddress, Market]:
- """
- Call and process the raw market data
-
- Returns
- -------
- decoded_markets : dict
- dictionary decoded market data.
-
- """
- decoded_markets = {}
-
- token_address_dict, raw_markets = await asyncio.gather(
- get_tokens_address_dict(), self._get_available_markets_raw()
- )
- checks = await asyncio.gather(
- *[
- self._check_if_index_token_in_signed_prices_api(to_checksum(raw_market.index_token))
- for raw_market in raw_markets
- ]
- )
-
- for raw_market, check in zip(raw_markets, checks):
- if not check:
- continue
- try:
- market_symbol = token_address_dict[to_checksum(raw_market.index_token)].symbol
-
- if raw_market.long_token == raw_market.short_token:
- market_symbol = f"{market_symbol}2"
- decoded_markets[to_checksum(raw_market.market_token)] = Market(
- address=to_checksum(raw_market.market_token),
- market_symbol=market_symbol,
- index_token_address=to_checksum(raw_market.index_token),
- market_metadata=token_address_dict[to_checksum(raw_market.index_token)],
- long_token_metadata=token_address_dict[to_checksum(raw_market.long_token)],
- long_token_address=to_checksum(raw_market.long_token),
- short_token_metadata=token_address_dict[to_checksum(raw_market.short_token)],
- short_token_address=to_checksum(raw_market.short_token),
- )
- if raw_market.market_token == "0x0Cf1fb4d1FF67A3D8Ca92c9d6643F8F9be8e03E5":
- decoded_markets[to_checksum(raw_market.market_token)].market_symbol = "wstETH"
- decoded_markets[to_checksum(raw_market.market_token)].index_token_address = to_checksum(
- "0x5979D7b546E38E414F7E9822514be443A4800529"
- )
-
- # If KeyError it is because there is no market symbol and it is a
- # swap market
- except KeyError:
- decoded_markets[to_checksum(raw_market.market_token)] = Market(
- address=to_checksum(raw_market.market_token),
- market_symbol="SWAP {}-{}".format(
- token_address_dict[to_checksum(raw_market.long_token)].symbol,
- token_address_dict[to_checksum(raw_market.short_token)].symbol,
- ),
- index_token_address=to_checksum(raw_market.index_token),
- market_metadata=MarketMetadata(
- symbol="SWAP {}-{}".format(
- token_address_dict[to_checksum(raw_market.long_token)].symbol,
- token_address_dict[to_checksum(raw_market.short_token)].symbol,
- )
- ),
- long_token_metadata=token_address_dict[to_checksum(raw_market.long_token)],
- long_token_address=to_checksum(raw_market.long_token),
- short_token_metadata=token_address_dict[to_checksum(raw_market.short_token)],
- short_token_address=to_checksum(raw_market.short_token),
- )
-
- return decoded_markets
-
- async def _check_if_index_token_in_signed_prices_api(self, index_token_address: ChecksumAddress) -> bool:
- try:
- prices = await OraclePrices.get_recent_prices()
-
- if index_token_address == "0x0000000000000000000000000000000000000000":
- return True
- prices[to_checksum(index_token_address)]
- return True
- except KeyError:
- print("{} market not live on GMX yet..".format(index_token_address))
- return False
-
- @property
- def info(self) -> dict[ChecksumAddress, Market]:
- return self._info
diff --git a/src/talos/contracts/gmx/getters/open_interest.py b/src/talos/contracts/gmx/getters/open_interest.py
deleted file mode 100644
index 62dfeb11..00000000
--- a/src/talos/contracts/gmx/getters/open_interest.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import asyncio
-import logging
-from typing import Awaitable
-
-from eth_rpc.types.primitives import address, int256, uint256
-
-from ..contracts.synthetics_reader import MarketProps, PriceProps
-from .get import GetData
-from .prices import OraclePrices
-from ..utils import numerize
-
-
-class OpenInterest(GetData):
- async def _get_data_processing(self) -> dict[str, dict[str, str | float] | str]:
- """
- Generate the dictionary of open interest data
-
- Returns
- -------
- funding_apr : dict
- dictionary of open interest data.
-
- """
- oracle_prices_dict = await OraclePrices.get_recent_prices()
- print("GMX v2 Open Interest\n")
-
- long_oi_output_list: list[Awaitable[int256]] = []
- short_oi_output_list: list[Awaitable[int256]] = []
- long_pnl_output_list: list[Awaitable[int256]] = []
- short_pnl_output_list: list[Awaitable[int256]] = []
- mapper = []
- long_precision_list: list[int] = []
-
- for market_key in self.markets.info:
- self._filter_swap_markets()
- self._get_token_addresses(market_key)
-
- index_token_address = self.markets.get_index_token_address(market_key)
-
- assert self._short_token_address is not None
- assert self._long_token_address is not None
-
- market = MarketProps(
- market_token=address(market_key),
- index_token=address(index_token_address),
- long_token=address(self._long_token_address),
- short_token=address(self._short_token_address),
- )
-
- min_price = int(oracle_prices_dict[index_token_address].min_price_full)
- max_price = int(oracle_prices_dict[index_token_address].max_price_full)
- price_props = PriceProps(min=uint256(min_price), max=uint256(max_price))
-
- # If the market is a synthetic one we need to use the decimals
- # from the index token
- try:
- if self.markets.is_synthetic(market_key):
- decimal_factor = self.markets.get_decimal_factor(
- market_key,
- )
- else:
- decimal_factor = self.markets.get_decimal_factor(market_key, long=True)
- except KeyError:
- decimal_factor = self.markets.get_decimal_factor(market_key, long=True)
-
- oracle_factor = 30 - decimal_factor
- precision = 10 ** (decimal_factor + oracle_factor)
- long_precision_list = long_precision_list + [precision]
-
- long_oi_with_pnl, long_pnl = self._get_pnl(market, price_props, is_long=True)
-
- short_oi_with_pnl, short_pnl = self._get_pnl(market, price_props, is_long=False)
-
- long_oi_output_list.append(long_oi_with_pnl)
- short_oi_output_list.append(short_oi_with_pnl)
- long_pnl_output_list.append(long_pnl)
- short_pnl_output_list.append(short_pnl)
- mapper.append(self.markets.get_market_symbol(market_key))
-
- # TODO - currently just waiting x amount of time to not hit rate limit,
- # but needs a retry
- long_oi_threaded_output = await asyncio.gather(*long_oi_output_list)
- await asyncio.sleep(2)
- short_oi_threaded_output = await asyncio.gather(*short_oi_output_list)
- await asyncio.sleep(2)
- long_pnl_threaded_output = await asyncio.gather(*long_pnl_output_list)
- await asyncio.sleep(2)
- short_pnl_threaded_output = await asyncio.gather(*short_pnl_output_list)
-
- for market_symbol, long_oi, short_oi, long_pnl, short_pnl, long_precision in zip(
- mapper,
- long_oi_threaded_output,
- short_oi_threaded_output,
- long_pnl_threaded_output,
- short_pnl_threaded_output,
- long_precision_list,
- ):
- precision = 10**30
- long_value = (long_oi - long_pnl) / long_precision
- short_value = (short_oi - short_pnl) / precision
-
- logging.info(f"{market_symbol} Long: ${numerize.numerize(long_value)}")
- logging.info(f"{market_symbol} Short: ${numerize.numerize(short_value)}")
-
- self.output["long"][market_symbol] = long_value # type: ignore
- self.output["short"][market_symbol] = short_value # type: ignore
- self.output["parameter"] = "open_interest"
-
- return self.output
-
-
-if __name__ == "__main__":
- data = OpenInterest().get_data(to_csv=False)
- print(data)
diff --git a/src/talos/contracts/gmx/getters/open_positions.py b/src/talos/contracts/gmx/getters/open_positions.py
deleted file mode 100644
index 0c3d7655..00000000
--- a/src/talos/contracts/gmx/getters/open_positions.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import logging
-from typing import Annotated
-
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress
-from pydantic import BeforeValidator
-
-from ..constants import DATASTORE_ADDRESS
-from ..contracts import synthetics_reader
-from ..contracts.synthetics_reader import PositionProps
-from ..types import Position
-from ..utils import get_tokens_address_dict, median
-from .get import GetData
-from .prices import OraclePrices
-
-
-class GetOpenPositions(GetData):
- address: Annotated[ChecksumAddress, BeforeValidator(to_checksum)]
-
- async def get_data(self) -> dict[str, Position]: # type: ignore
- """
- Get all open positions for a given address on the chain defined in
- class init
- """
- raw_positions = await synthetics_reader.get_account_positions(DATASTORE_ADDRESS, self.address, 0, 10).get()
-
- if len(raw_positions) == 0:
- logging.info(
- 'No positions open for address: "{}"'.format(
- self.address,
- )
- )
- processed_positions = {}
-
- for raw_position in raw_positions:
- try:
- processed_position = await self._get_data_processing(raw_position)
-
- # TODO - maybe a better way of building the key?
- if processed_position.is_long:
- direction = "long"
- else:
- direction = "short"
-
- key = "{}_{}".format(processed_position.market_symbol, direction)
- processed_positions[key] = processed_position
- except KeyError as e:
- logging.error(f"Incompatible market: {e}")
-
- return processed_positions
-
- async def _get_data_processing(self, raw_position: PositionProps) -> Position: # type: ignore
- """
- A tuple containing the raw information return from the reader contract
- query GetAccountPositions
-
- Parameters
- ----------
- raw_position : tuple
- raw information return from the reader contract .
-
- Returns
- -------
- dict
- a processed dictionary containing info on the positions.
- """
- await self.markets.load_info()
- market_info = self.markets.info[to_checksum(raw_position.addresses.market)]
-
- chain_tokens = await get_tokens_address_dict()
-
- entry_price = (raw_position.numbers.size_in_usd / raw_position.numbers.size_in_tokens) / 10 ** (
- 30 - chain_tokens[to_checksum(market_info.index_token_address)].decimals
- )
-
- prices = await OraclePrices().get_recent_prices()
- mark_price = median(
- [
- float(prices[market_info.index_token_address].max_price_full),
- float(prices[market_info.index_token_address].min_price_full),
- ]
- ) / 10 ** (30 - chain_tokens[market_info.index_token_address].decimals)
-
- collateral_price = median(
- [
- float(prices[to_checksum(raw_position.addresses.collateral_token)].max_price_full),
- float(prices[to_checksum(raw_position.addresses.collateral_token)].min_price_full),
- ]
- ) / 10 ** (30 - chain_tokens[to_checksum(raw_position.addresses.collateral_token)].decimals)
-
- leverage = (raw_position.numbers.size_in_usd / 10**30) / (
- raw_position.numbers.collateral_amount * collateral_price
- / 10 ** chain_tokens[to_checksum(raw_position.addresses.collateral_token)].decimals
- )
-
- return Position(
- account=to_checksum(raw_position.addresses.account),
- market=market_info,
- market_symbol=self.markets.info[to_checksum(raw_position.addresses.market)].market_symbol,
- collateral_token=chain_tokens[to_checksum(raw_position.addresses.collateral_token)].address,
- position_size=raw_position.numbers.size_in_usd / 10**30,
- size_in_tokens=raw_position.numbers.size_in_tokens,
- entry_price=(
- (raw_position.numbers.size_in_usd / raw_position.numbers.size_in_tokens)
- / 10 ** (30 - chain_tokens[market_info.index_token_address].decimals)
- ),
- initial_collateral_amount=raw_position.numbers.collateral_amount,
- initial_collateral_amount_usd=(
- raw_position.numbers.collateral_amount * collateral_price
- / 10 ** chain_tokens[to_checksum(raw_position.addresses.collateral_token)].decimals
- ),
- leverage=leverage,
- borrowing_factor=raw_position.numbers.borrowing_factor,
- funding_fee_amount_per_size=raw_position.numbers.funding_fee_amount_per_size,
- long_token_claimable_funding_amount_per_size=raw_position.numbers.long_token_claimable_funding_amount_per_size,
- short_token_claimable_funding_amount_per_size=raw_position.numbers.short_token_claimable_funding_amount_per_size,
- position_modified_at="",
- is_long=raw_position.flags.is_long,
- percent_profit=((1 - (mark_price / entry_price)) * leverage) * 100,
- mark_price=mark_price,
- )
diff --git a/src/talos/contracts/gmx/getters/prices.py b/src/talos/contracts/gmx/getters/prices.py
deleted file mode 100644
index d40dd69b..00000000
--- a/src/talos/contracts/gmx/getters/prices.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Any, ClassVar
-
-import httpx
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress
-from pydantic import BaseModel
-
-if TYPE_CHECKING:
- from ..types import OraclePriceData
-
-
-class OraclePrices(BaseModel):
- ORACLE_URL: ClassVar[str] = "https://arbitrum-api.gmxinfra.io/signed_prices/latest"
-
- @classmethod
- async def get_recent_prices(cls) -> dict[ChecksumAddress, OraclePriceData]:
- """
- Get raw output of the GMX rest v2 api for signed prices
-
- Returns
- -------
- dict
- dictionary containing raw output for each token as its keys.
-
- """
- raw_output = await cls._make_query()
- return cls._process_output(raw_output)
-
- @classmethod
- async def _make_query(cls) -> Any:
- """
- Make request using oracle url
-
- Returns
- -------
- requests.models.Response
- raw request response.
-
- """
- async with httpx.AsyncClient() as client:
- response = await client.get(cls.ORACLE_URL)
- return response.json()
-
- @classmethod
- def _process_output(cls, output: Any) -> dict[ChecksumAddress, OraclePriceData]:
- """
- Take the API response and create a new dictionary where the index token
- addresses are the keys
-
- Parameters
- ----------
- output : dict
- Dictionary of rest API repsonse.
-
- Returns
- -------
- processed : TYPE
- DESCRIPTION.
- """
- from ..types import OraclePriceData
-
- processed = {}
- for i in output["signedPrices"]:
- processed[to_checksum(i["tokenAddress"])] = OraclePriceData.model_validate(i)
-
- return processed
diff --git a/src/talos/contracts/gmx/order/__init__.py b/src/talos/contracts/gmx/order/__init__.py
deleted file mode 100644
index 38d76ed4..00000000
--- a/src/talos/contracts/gmx/order/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .order import Order
-from .executor import OrderExecutor
-
-__all__ = ['Order', 'OrderExecutor']
diff --git a/src/talos/contracts/gmx/order/executor.py b/src/talos/contracts/gmx/order/executor.py
deleted file mode 100644
index c1669ce2..00000000
--- a/src/talos/contracts/gmx/order/executor.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from eth_typing import HexStr, HexAddress
-
-from ..utils.gas import get_gas_limits
-from .order import Order
-from ..types.orders import OrderType
-
-
-class OrderExecutor(Order):
- """
- Open a buy order
- Extends base Order class
- """
-
- async def execute(self, order_type: OrderType = OrderType.MarketIncrease) -> HexStr:
- await self.determine_gas_limits()
- return await self.execute_order(order_type=order_type)
-
- async def determine_gas_limits(self) -> None:
- self._gas_limits = await get_gas_limits()
- self._gas_limits_order_type = self._gas_limits.increase_order
-
- @classmethod
- async def calculate_initial_collateral_tokens(cls, size_delta_usd: float, leverage: int, start_token_address: HexAddress):
- """
- Calculate the amount of tokens collateral from the USD value
- """
- from ..getters.prices import OraclePrices
- from ..utils import median, get_tokens_address_dict
-
- collateral_usd = size_delta_usd / leverage
-
- prices = await OraclePrices().get_recent_prices()
- price = median(
- [
- float(prices[start_token_address].max_price_full),
- float(prices[start_token_address].min_price_full),
- ]
- )
-
- address_dict = await get_tokens_address_dict()
- oracle_factor = address_dict[start_token_address].decimals - 30
-
- amount = collateral_usd / (price * 10**oracle_factor)
-
- decimal = address_dict[start_token_address].decimals
- scaled_amount = int(amount * 10**decimal)
-
- return scaled_amount
-
- async def get_price(self, token_address: HexAddress) -> float:
- from ..getters.prices import OraclePrices
- from ..utils import median
-
- prices = await OraclePrices().get_recent_prices()
- return median([float(prices[token_address].max_price_full), float(prices[token_address].min_price_full)])
diff --git a/src/talos/contracts/gmx/order/order.py b/src/talos/contracts/gmx/order/order.py
deleted file mode 100644
index c598e52f..00000000
--- a/src/talos/contracts/gmx/order/order.py
+++ /dev/null
@@ -1,464 +0,0 @@
-import logging
-from typing import Optional
-
-from eth_rpc import Block, PrivateKeyWallet
-from eth_rpc.networks import Arbitrum
-from eth_rpc.types import primitives
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress, HexAddress, HexStr
-from hexbytes import HexBytes
-from pydantic import BaseModel, Field, PrivateAttr
-
-from ..constants import (
- DATASTORE_ADDRESS,
- ORDER_VAULT_ADDRESS,
- PRECISION,
- SYNTHETICS_ROUTER_CONTRACT_ADDRESS,
- USDC_ADDRESS,
- WETH_ADDRESS,
-)
-from ..contracts.exchange_router import (
- CreateOrderParams,
- CreateOrderParamsAddresses,
- CreateOrderParamsNumbers,
- ExchangeRouter,
- exchange_router,
-)
-from ..contracts.synthetics_reader import ExecutionPriceParams, PriceProps
-from ..getters.markets import Markets
-from ..getters.prices import OraclePrices
-from ..types import DecreasePositionSwapType, Market, OraclePriceData, OrderType
-from ..types.gas_limits import GasLimits
-from ..utils.approval import check_if_approved
-from ..utils.gas import get_execution_fee
-from ..utils.price import get_execution_price_and_price_impact
-from ..utils import median
-
-
-class Order(BaseModel):
- wallet: PrivateKeyWallet
- market_key: ChecksumAddress
- collateral_address: ChecksumAddress
- index_token_address: ChecksumAddress
- is_long: bool
- size_delta: int
- initial_collateral_delta: int
- slippage_percent: float = Field(default=0.003)
- swap_path: list[HexAddress] = Field(default_factory=list)
- max_fee_per_gas: int | None = Field(default=None)
- auto_cancel: bool = Field(default=False)
- debug_mode: bool = Field(default=False)
- execution_buffer: float = Field(default=1.3)
- markets: Markets = Field(default_factory=Markets)
-
- _exchange_router: ExchangeRouter = PrivateAttr(default=exchange_router)
- _is_swap: bool = PrivateAttr(default=False)
- _gas_limits: Optional[GasLimits] = PrivateAttr(default=None)
- _gas_limits_order_type: primitives.uint256 | None = PrivateAttr(default=None)
-
- async def get_block_fee(self) -> None:
- if self.max_fee_per_gas is None:
- block = await Block[Arbitrum].latest()
- assert block.base_fee_per_gas is not None
- self.max_fee_per_gas = int(block.base_fee_per_gas * 1.35)
-
- async def estimated_swap_output(self, market: Market, in_token: HexAddress, in_token_amount: int) -> dict: # type: ignore
- raise NotImplementedError()
-
- async def determine_gas_limits(self) -> None:
- pass
-
- async def check_for_approval(self) -> None:
- """
- Check for Approval
- """
- await check_if_approved(
- self.wallet,
- SYNTHETICS_ROUTER_CONTRACT_ADDRESS,
- self.collateral_address,
- self.initial_collateral_delta,
- approve=True,
- )
-
- async def _submit_transaction(
- self,
- wallet: PrivateKeyWallet,
- value_amount: float,
- multicall_args: list[bytes],
- ) -> HexStr:
- """
- Submit Transaction
- """
- logging.info("Building transaction...")
-
- tx_hash: HexStr = await self._exchange_router.multicall(multicall_args).execute(wallet, value=value_amount)
-
- logging.info("Txn submitted!")
- logging.info("Check status: https://arbiscan.io/tx/0x{}".format(tx_hash))
- logging.info("Transaction submitted!")
-
- return tx_hash
-
- def _get_prices(
- self,
- decimals: float,
- prices: dict[ChecksumAddress, OraclePriceData],
- is_open: bool = False,
- is_close: bool = False,
- ) -> tuple[float, int, float]:
- """
- Get Prices
- """
- logging.info("Getting prices...")
- price = median(
- [
- float(prices[self.index_token_address].max_price_full),
- float(prices[self.index_token_address].min_price_full),
- ]
- )
-
- # Depending on if open/close & long/short, we need to account for
- # slippage in a different way
- if is_open:
- if self.is_long:
- slippage = int(float(price) + float(price) * self.slippage_percent)
- else:
- slippage = int(float(price) - float(price) * self.slippage_percent)
- elif is_close:
- if self.is_long:
- slippage = int(float(price) - float(price) * self.slippage_percent)
- else:
- slippage = int(float(price) + float(price) * self.slippage_percent)
- else:
- slippage = 0
-
- acceptable_price_in_usd = slippage * 10 ** (decimals - PRECISION)
-
- logging.info("Mark Price: ${:.4f}".format(float(price * 10 ** (decimals - PRECISION))))
-
- if acceptable_price_in_usd != 0:
- logging.info("Acceptable price: ${:.4f}".format(acceptable_price_in_usd))
-
- return float(price), int(slippage), acceptable_price_in_usd
-
- async def execute_order(self, order_type: OrderType = OrderType.MarketIncrease) -> HexStr:
- """
- Create Order
- """
- is_close = order_type == OrderType.MarketDecrease
- is_open = order_type == OrderType.MarketIncrease
- is_swap = order_type == OrderType.MarketSwap
-
- # Prepare order execution
- execution_fee, market_info, prices, size_delta_price_price_impact = await self._prepare_order_execution(
- is_close
- )
-
- # Determine order type and handle swap-specific logic
- order_type, min_output_amount = await self._determine_order_type_and_swap_logic(
- is_open, is_close, is_swap, market_info, size_delta_price_price_impact
- )
-
- # Calculate prices and validate execution
- (
- price,
- acceptable_price,
- acceptable_price_in_usd,
- mark_price,
- gmx_market_address,
- ) = await self._calculate_and_validate_prices(
- is_open, is_close, is_swap, market_info, prices, size_delta_price_price_impact
- )
-
- # Create order parameters
- arguments = self._create_order_parameters(
- order_type, execution_fee, min_output_amount, mark_price, acceptable_price, gmx_market_address
- )
-
- # Build and submit transaction
- value_amount, multicall_args = self._build_transaction_arguments(
- is_open, is_close, is_swap, execution_fee, arguments
- )
-
- return await self._submit_transaction(
- self.wallet,
- value_amount,
- multicall_args, # type: ignore
- )
-
- async def _prepare_order_execution(
- self, is_close: bool
- ) -> tuple[
- primitives.uint256,
- dict[ChecksumAddress, Market],
- dict[ChecksumAddress, OraclePriceData],
- primitives.int256,
- ]:
- """
- Prepare order execution by getting fees, checking approvals, and loading market data
- """
- execution_fee = await self._get_execution_fee()
-
- # Don't need to check approval when closing
- if not is_close and not self.debug_mode and self.collateral_address != WETH_ADDRESS:
- await self.check_for_approval()
-
- await self.markets.load_info()
-
- market_info = self.markets.info
- prices = await OraclePrices().get_recent_prices()
- size_delta_price_price_impact = self.size_delta
-
- # when decreasing size delta must be negative
- if is_close:
- size_delta_price_price_impact = size_delta_price_price_impact * -1
-
- return execution_fee, market_info, prices, primitives.int256(size_delta_price_price_impact)
-
- async def _determine_order_type_and_swap_logic(
- self,
- is_open: bool,
- is_close: bool,
- is_swap: bool,
- market_info: dict[ChecksumAddress, Market],
- size_delta_price_price_impact: int,
- ) -> tuple[OrderType, int]:
- """
- Determine order type and handle swap-specific logic
- """
- min_output_amount = 0
-
- if is_open:
- order_type = OrderType.MarketIncrease
- elif is_close:
- order_type = OrderType.MarketDecrease
- elif is_swap:
- order_type = OrderType.MarketSwap
-
- # Estimate amount of token out using a reader function, necessary
- # for multi swap
- estimated_output = await self.estimated_swap_output(
- market_info[to_checksum(self.swap_path[0])], self.collateral_address, self.initial_collateral_delta
- )
-
- # this var will help to calculate the cost gas depending on the
- # operation
- assert self._gas_limits is not None
-
- self._get_limits_order_type = self._gas_limits.single_swap
- if len(self.swap_path) > 1:
- estimated_output = await self.estimated_swap_output(
- market_info[to_checksum(self.swap_path[1])],
- USDC_ADDRESS,
- int(
- estimated_output["out_token_amount"]
- - estimated_output["out_token_amount"] * self.slippage_percent
- ),
- )
- self._get_limits_order_type = self._gas_limits.swap_order
-
- min_output_amount = (
- estimated_output["out_token_amount"] - estimated_output["out_token_amount"] * self.slippage_percent
- )
-
- return order_type, min_output_amount
-
- async def _calculate_and_validate_prices(
- self,
- is_open: bool,
- is_close: bool,
- is_swap: bool,
- market_info: dict[ChecksumAddress, Market],
- prices: dict[ChecksumAddress, OraclePriceData],
- size_delta_price_price_impact: int,
- ) -> tuple[float, int, float, int, HexAddress]:
- """
- Calculate prices and validate execution price
- """
- # Create execution price parameters
- execution_price_parameters = ExecutionPriceParams(
- data_store=DATASTORE_ADDRESS,
- market_key=self.market_key,
- index_token_price=PriceProps(
- min=primitives.uint256(int(prices[self.index_token_address].min_price_full)),
- max=primitives.uint256(int(prices[self.index_token_address].max_price_full)),
- ),
- position_size_in_usd=primitives.uint256(0),
- position_size_in_tokens=primitives.uint256(0),
- size_delta_usd=primitives.int256(size_delta_price_price_impact),
- is_long=self.is_long,
- )
- decimals = market_info[self.market_key].market_metadata.decimals
-
- price, acceptable_price, acceptable_price_in_usd = self._get_prices(
- decimals,
- prices,
- is_open,
- is_close,
- )
-
- mark_price = 0
- # mark price should be actual price when opening
- if is_open:
- mark_price = int(price)
-
- gmx_market_address = to_checksum(self.market_key)
- # Market address and acceptable price not important for swap
- if is_swap:
- acceptable_price = 0
- gmx_market_address = to_checksum("0x0000000000000000000000000000000000000000")
-
- # Get execution price and validate
- execution_price_and_price_impact_dict = await get_execution_price_and_price_impact(
- execution_price_parameters,
- decimals,
- )
- logging.info("Execution price: ${:.4f}".format(execution_price_and_price_impact_dict["execution_price"]))
-
- # Validate execution price
- self._validate_execution_price(
- is_open, is_close, execution_price_and_price_impact_dict, acceptable_price_in_usd
- )
-
- return price, acceptable_price, acceptable_price_in_usd, mark_price, gmx_market_address
-
- def _validate_execution_price(
- self, is_open: bool, is_close: bool, execution_price_dict: dict[str, float], acceptable_price_in_usd: float
- ) -> None:
- """
- Validate that execution price falls within acceptable range
- """
- execution_price = execution_price_dict["execution_price"]
-
- if is_open:
- self._validate_open_position_price(execution_price, acceptable_price_in_usd)
- elif is_close:
- self._validate_close_position_price(execution_price, acceptable_price_in_usd)
-
- def _validate_open_position_price(self, execution_price: float, acceptable_price_in_usd: float) -> None:
- """Validate execution price for opening positions"""
- if self.is_long and execution_price > acceptable_price_in_usd:
- raise Exception("Execution price falls outside acceptable price!")
- elif not self.is_long and execution_price < acceptable_price_in_usd:
- raise Exception("Execution price falls outside acceptable price!")
-
- def _validate_close_position_price(self, execution_price: float, acceptable_price_in_usd: float) -> None:
- """Validate execution price for closing positions"""
- if self.is_long and execution_price < acceptable_price_in_usd:
- raise Exception("Execution price falls outside acceptable price!")
- elif not self.is_long and execution_price > acceptable_price_in_usd:
- raise Exception("Execution price falls outside acceptable price!")
-
- def _create_order_parameters(
- self,
- order_type: OrderType,
- execution_fee: int,
- min_output_amount: int,
- mark_price: int,
- acceptable_price: int,
- gmx_market_address: HexAddress,
- ) -> CreateOrderParams:
- """
- Create order parameters
- """
- decrease_position_swap_type = DecreasePositionSwapType.NoSwap
- should_unwrap_native_token = True
- referral_code = HexBytes("0x0000000000000000000000000000000000000000000000000000000000000000")
- user_wallet_address = to_checksum(self.wallet.address)
- eth_zero_address = to_checksum(HexAddress(HexStr("0x0000000000000000000000000000000000000000")))
- ui_ref_address = to_checksum(HexAddress(HexStr("0x0000000000000000000000000000000000000000")))
- collateral_address = to_checksum(self.collateral_address)
-
- return CreateOrderParams(
- addresses=CreateOrderParamsAddresses(
- receiver=user_wallet_address,
- cancellation_receiver=user_wallet_address,
- callback_contract=eth_zero_address,
- ui_fee_receiver=ui_ref_address,
- market=gmx_market_address,
- initial_collateral_token=collateral_address,
- swap_path=self.swap_path,
- ),
- numbers=CreateOrderParamsNumbers(
- size_delta_usd=primitives.uint256(self.size_delta),
- initial_collateral_delta_amount=primitives.uint256(self.initial_collateral_delta),
- trigger_price=primitives.uint256(mark_price),
- acceptable_price=primitives.uint256(acceptable_price),
- execution_fee=primitives.uint256(execution_fee),
- callback_gas_limit=primitives.uint256(0),
- min_output_amount=primitives.uint256(min_output_amount),
- valid_from_time=primitives.uint256(0),
- ),
- order_type=primitives.uint8(order_type),
- decrease_position_swap_type=primitives.uint8(decrease_position_swap_type),
- is_long=self.is_long,
- should_unwrap_native_token=should_unwrap_native_token,
- auto_cancel=self.auto_cancel,
- referral_code=primitives.bytes32(referral_code),
- data_list=[],
- )
-
- def _build_transaction_arguments(
- self,
- is_open: bool,
- is_close: bool,
- is_swap: bool,
- execution_fee: primitives.uint256,
- arguments: CreateOrderParams,
- ) -> tuple[primitives.uint256, list[HexBytes]]:
- """
- Build multicall transaction arguments
- """
- value_amount = execution_fee
- initial_collateral_delta_amount = self.initial_collateral_delta
-
- if self.collateral_address != WETH_ADDRESS and not is_close:
- multicall_args = [
- HexBytes(self._send_wnt(value_amount)),
- HexBytes(self._send_tokens(primitives.uint256(initial_collateral_delta_amount))),
- HexBytes(self._create_order(arguments)),
- ]
- else:
- # send start token and execute fee if token is ETH or AVAX
- if is_open or is_swap:
- value_amount = primitives.uint256(initial_collateral_delta_amount + execution_fee)
-
- multicall_args = [HexBytes(self._send_wnt(value_amount)), HexBytes(self._create_order(arguments))]
-
- return value_amount, multicall_args
-
- def _create_order(self, params: CreateOrderParams) -> primitives.bytes32:
- """
- Create Order
- """
- return primitives.bytes32(bytes.fromhex(self._exchange_router.create_order(params).data[2:]))
-
- def _send_tokens(self, amount: primitives.uint256) -> primitives.bytes32:
- """
- Send tokens
- """
- return primitives.bytes32(
- bytes.fromhex(
- self._exchange_router.send_tokens(
- (self.collateral_address, ORDER_VAULT_ADDRESS, amount),
- ).data[2:]
- )
- )
-
- def _send_wnt(self, amount: primitives.uint256) -> primitives.bytes32:
- """
- Send WNT
- """
- return primitives.bytes32(bytes.fromhex(self._exchange_router.send_wnt((ORDER_VAULT_ADDRESS, amount)).data[2:]))
-
- async def _get_execution_fee(self) -> primitives.uint256:
- await self.determine_gas_limits()
- block = await Block[Arbitrum].latest()
- gas_price = block.base_fee_per_gas
- assert self._gas_limits is not None
- assert gas_price is not None
-
- assert self._gas_limits_order_type is not None
- execution_fee = get_execution_fee(self._gas_limits, self._gas_limits_order_type, gas_price)
-
- return primitives.uint256(int(execution_fee * self.execution_buffer))
diff --git a/src/talos/contracts/gmx/types/__init__.py b/src/talos/contracts/gmx/types/__init__.py
deleted file mode 100644
index d0876077..00000000
--- a/src/talos/contracts/gmx/types/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from .gas_limits import GasLimits
-from .markets import Market
-from .orders import DecreasePositionSwapType, OrderType
-from .position import Position
-from .prices import OraclePriceData
-from .tokens import TokenMetadata
-
-__all__ = [
- "GasLimits",
- "OrderType",
- "DecreasePositionSwapType",
- "Position",
- "Market",
- "OraclePriceData",
- "TokenMetadata",
-]
diff --git a/src/talos/contracts/gmx/types/gas_limits.py b/src/talos/contracts/gmx/types/gas_limits.py
deleted file mode 100644
index 4acce68c..00000000
--- a/src/talos/contracts/gmx/types/gas_limits.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from pydantic import BaseModel
-from eth_rpc.types import primitives
-
-
-class GasLimits(BaseModel):
- deposit: primitives.uint256
- withdraw: primitives.uint256
- single_swap: primitives.uint256
- swap_order: primitives.uint256
- increase_order: primitives.uint256
- decrease_order: primitives.uint256
- estimated_fee_base_gas_limit: primitives.uint256
- estimated_fee_multiplier_factor: primitives.uint256
diff --git a/src/talos/contracts/gmx/types/markets.py b/src/talos/contracts/gmx/types/markets.py
deleted file mode 100644
index 8ea48c67..00000000
--- a/src/talos/contracts/gmx/types/markets.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from eth_typing import ChecksumAddress
-from pydantic import BaseModel, Field
-
-from .tokens import TokenMetadata
-
-
-class MarketMetadata(BaseModel):
- symbol: str
- synthetic: bool = Field(default=False)
- decimals: int = 18
-
-
-class LongTokenMetadata(BaseModel):
- symbol: str
- decimals: int
-
-
-class Market(BaseModel):
- address: ChecksumAddress
- market_symbol: str
- index_token_address: ChecksumAddress
- market_metadata: TokenMetadata | MarketMetadata
- long_token_metadata: TokenMetadata
- long_token_address: ChecksumAddress
- short_token_metadata: TokenMetadata
- short_token_address: ChecksumAddress
diff --git a/src/talos/contracts/gmx/types/orders.py b/src/talos/contracts/gmx/types/orders.py
deleted file mode 100644
index bf842de7..00000000
--- a/src/talos/contracts/gmx/types/orders.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from enum import IntEnum
-
-
-class OrderType(IntEnum):
- MarketSwap = 0
- LimitSwap = 1
- MarketIncrease = 2
- LimitIncrease = 3
- MarketDecrease = 4
- LimitDecrease = 5
- StopLossDecrease = 6
- Liquidation = 7
-
-
-class DecreasePositionSwapType(IntEnum):
- NoSwap = 0
- SwapPnlTokenToCollateralToken = 1
- SwapCollateralTokenToPnlToken = 2
diff --git a/src/talos/contracts/gmx/types/position.py b/src/talos/contracts/gmx/types/position.py
deleted file mode 100644
index 86bcf8b2..00000000
--- a/src/talos/contracts/gmx/types/position.py
+++ /dev/null
@@ -1,172 +0,0 @@
-from __future__ import annotations
-
-from eth_rpc import PrivateKeyWallet
-from eth_rpc.types import primitives
-from eth_typing import ChecksumAddress, HexAddress, HexStr
-from pydantic import BaseModel
-
-from ..constants import WETH_ADDRESS
-from ..contracts import datastore, exchange_router
-from ..getters.prices import OraclePrices
-from ..types.orders import OrderType
-from ..utils.keys import claimable_funding_amount_key
-from ..utils.tokens import get_tokens_address_dict
-from .markets import Market
-
-
-class Position(BaseModel):
- """
- Get all open positions for a given address on the chain defined in
- class init
-
- Parameters
- ----------
- address : HexAddress
- The address of the account to get the positions for
-
- Returns
- -------
- list[Position]
- A list of Position objects
-
- > positions = await Position.get_positions(address)
- > await positions[0].update_leverage(wallet, 1)
-
- """
-
- account: HexAddress
- market: Market
- market_symbol: str
- collateral_token: ChecksumAddress
- position_size: float
- size_in_tokens: int
- entry_price: float
- initial_collateral_amount: int
- initial_collateral_amount_usd: float
- leverage: float
- borrowing_factor: float
- funding_fee_amount_per_size: float
- long_token_claimable_funding_amount_per_size: float
- short_token_claimable_funding_amount_per_size: float
- position_modified_at: str
- is_long: bool
- percent_profit: float
- mark_price: float
-
- @classmethod
- async def get_positions(cls, address: ChecksumAddress) -> list[Position]:
- from ..getters.open_positions import GetOpenPositions
-
- positions = await GetOpenPositions(address=address).get_data()
- return list(positions.values())
-
- async def claim_market_fees(self, wallet: PrivateKeyWallet) -> HexStr | None:
- claimable_fee = await self.get_claimable_funding_fees()
- if claimable_fee > 0:
- tx_hash: HexStr = await exchange_router.claim_funding_fees(
- ([self.market.address], [self.collateral_token], [wallet.address])
- ).execute(wallet)
- return tx_hash
- return None
-
- async def get_claimable_funding_fees(self) -> primitives.uint256:
- claimable_fee = claimable_funding_amount_key(self.market.address, self.market.short_token_address, self.account)
- return primitives.uint256(await datastore.get_uint(claimable_fee).get())
-
- async def update_leverage(
- self,
- wallet: PrivateKeyWallet,
- new_leverage: float,
- additional_collateral: int = 0,
- slippage_percent: float = 0.003,
- swap_path: list[HexAddress] = [],
- ) -> HexStr | None:
- from ..order.executor import OrderExecutor
-
- position_size = self.position_size
-
- new_size: float = position_size * new_leverage / self.leverage
- size_delta = int(abs(new_size - self.position_size) * 1e30)
-
- if position_size < new_size:
- order_type = OrderType.MarketIncrease
- else:
- order_type = OrderType.MarketDecrease
-
- order = OrderExecutor(
- wallet=wallet,
- market_key=self.market.address,
- collateral_address=self.collateral_token,
- index_token_address=self.market.index_token_address,
- is_long=self.is_long,
- size_delta=size_delta,
- initial_collateral_delta=additional_collateral,
- slippage_percent=slippage_percent,
- swap_path=swap_path,
- execution_buffer=1.5,
- debug_mode=True,
- )
-
- tx_hash = await order.execute(order_type=order_type)
- return tx_hash
-
- async def close(
- self, wallet: PrivateKeyWallet, slippage_percent: float = 0.003, swap_path: list[HexAddress] = []
- ) -> HexStr | None:
- from ..order.executor import OrderExecutor
-
- order = OrderExecutor(
- wallet=wallet,
- market_key=self.market.address,
- collateral_address=self.collateral_token,
- index_token_address=self.market.index_token_address,
- is_long=self.is_long,
- size_delta=int(self.position_size * 10**30),
- initial_collateral_delta=self.size_in_tokens,
- slippage_percent=slippage_percent,
- swap_path=swap_path,
- execution_buffer=1.5,
- )
-
- tx_hash = await order.execute(order_type=OrderType.MarketDecrease)
- return tx_hash
-
- @classmethod
- async def create_position(
- cls,
- wallet: PrivateKeyWallet,
- market: Market,
- is_long: bool,
- size_in_usd: int,
- collateral_amount: int,
- slippage_percent: float = 0.003,
- swap_path: list[HexAddress] = [],
- collateral_address: ChecksumAddress = WETH_ADDRESS,
- ) -> HexStr | None:
- from ..order.executor import OrderExecutor
-
- prices = await OraclePrices().get_recent_prices()
- price = prices[collateral_address].max_price_full
- tokens = await get_tokens_address_dict()
-
- token_collateral_amount: float = float(collateral_amount / price)
- decimal: int = tokens[collateral_address].decimals
- oracle_factor: int = 12
- scaled_amount: int = int(token_collateral_amount * 10 ** (decimal + oracle_factor))
-
- order = OrderExecutor(
- wallet=wallet,
- market_key=market.address,
- collateral_address=collateral_address,
- index_token_address=market.index_token_address,
- is_long=is_long,
- size_delta=int(size_in_usd * 10**30), # 10**30
- initial_collateral_delta=scaled_amount, # amount of collateral to send
- slippage_percent=slippage_percent,
- swap_path=swap_path,
- execution_buffer=1.5,
- debug_mode=True,
- )
-
- tx_hash = await order.execute()
- return tx_hash
diff --git a/src/talos/contracts/gmx/types/prices.py b/src/talos/contracts/gmx/types/prices.py
deleted file mode 100644
index 1374c278..00000000
--- a/src/talos/contracts/gmx/types/prices.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from datetime import datetime
-from typing import Optional
-
-from pydantic import BaseModel, Field
-
-
-class OraclePriceData(BaseModel):
- id: str
- min_block_number: Optional[int] = Field(None, alias="minBlockNumber")
- min_block_hash: Optional[str] = Field(None, alias="minBlockHash")
- oracle_decimals: Optional[int] = Field(None, alias="oracleDecimals")
- token_symbol: str = Field(alias="tokenSymbol")
- token_address: str = Field(alias="tokenAddress")
- min_price: Optional[float] = Field(None, alias="minPrice")
- max_price: Optional[float] = Field(None, alias="maxPrice")
- signer: Optional[str] = None
- signature: Optional[str] = None
- signature_without_block_hash: Optional[str] = Field(None, alias="signatureWithoutBlockHash")
- created_at: datetime = Field(alias="createdAt")
- min_block_timestamp: int = Field(alias="minBlockTimestamp")
- oracle_keeper_key: str = Field(alias="oracleKeeperKey")
- max_block_timestamp: int = Field(alias="maxBlockTimestamp")
- max_block_number: Optional[int] = Field(None, alias="maxBlockNumber")
- max_block_hash: Optional[str] = Field(None, alias="maxBlockHash")
- max_price_full: float = Field(alias="maxPriceFull")
- min_price_full: float = Field(alias="minPriceFull")
- oracle_keeper_record_id: Optional[str] = Field(None, alias="oracleKeeperRecordId")
- oracle_keeper_fetch_type: str = Field(alias="oracleKeeperFetchType")
- oracle_type: str = Field(alias="oracleType")
- blob: str
- is_valid: bool = Field(alias="isValid")
- invalid_reason: Optional[str] = Field(None, alias="invalidReason")
-
- class Config:
- populate_by_name = True
diff --git a/src/talos/contracts/gmx/types/tokens.py b/src/talos/contracts/gmx/types/tokens.py
deleted file mode 100644
index 255428cd..00000000
--- a/src/talos/contracts/gmx/types/tokens.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from eth_typing import HexAddress
-from pydantic import BaseModel, Field
-
-
-class TokenMetadata(BaseModel):
- symbol: str
- address: HexAddress
- decimals: int
- synthetic: bool = Field(default=False)
diff --git a/src/talos/contracts/gmx/utils/__init__.py b/src/talos/contracts/gmx/utils/__init__.py
deleted file mode 100644
index a000f22f..00000000
--- a/src/talos/contracts/gmx/utils/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .math import median
-from .numerize import numerize
-from .swap import determine_swap_route
-from .tokens import get_tokens_address_dict
-
-__all__ = ['get_tokens_address_dict', 'determine_swap_route', 'median', 'numerize']
diff --git a/src/talos/contracts/gmx/utils/approval.py b/src/talos/contracts/gmx/utils/approval.py
deleted file mode 100644
index 43d1eebe..00000000
--- a/src/talos/contracts/gmx/utils/approval.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from eth_rpc import PrivateKeyWallet
-from eth_rpc.networks import Arbitrum
-from eth_rpc.utils import to_checksum
-from eth_typeshed.erc20 import ERC20, ApproveRequest, OwnerSpenderRequest
-from eth_typing import HexAddress, HexStr
-
-
-async def check_if_approved(
- wallet: PrivateKeyWallet,
- spender: HexAddress,
- token_to_approve: HexAddress,
- amount_of_tokens_to_spend: int,
- approve: bool,
-):
- if token_to_approve == HexAddress(HexStr("0x47904963fc8b2340414262125aF798B9655E58Cd")):
- token_to_approve = HexAddress(HexStr("0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f"))
-
- spender_checksum_address = to_checksum(spender)
-
- # User wallet address will be taken from config file
- user_checksum_address = to_checksum(wallet.address)
- token_checksum_address = to_checksum(token_to_approve)
-
- token = ERC20[Arbitrum](address=token_to_approve)
-
- balance_of = await token.balance_of(user_checksum_address).get()
-
- if balance_of < amount_of_tokens_to_spend:
- raise Exception("Insufficient balance")
-
- amount_approved = await token.allowance(
- OwnerSpenderRequest(owner=user_checksum_address, spender=spender_checksum_address)
- ).get()
-
- print("Checking coins for approval..")
- if amount_approved < amount_of_tokens_to_spend and approve:
- print(
- 'Approving contract "{}" to spend {} tokens belonging to token address: {}'.format(
- spender_checksum_address, amount_of_tokens_to_spend, token_checksum_address
- )
- )
-
- tx_hash = token.approve(
- ApproveRequest(spender=spender_checksum_address, amount=amount_of_tokens_to_spend)
- ).execute(wallet)
-
- print("Txn submitted!")
- print("Check status: https://arbiscan.io/tx/{}".format(tx_hash.hex()))
-
- if amount_approved < amount_of_tokens_to_spend and not approve:
- raise Exception("Token not approved for spend, please allow first!")
-
- print(
- 'Contract "{}" approved to spend {} tokens belonging to token address: {}'.format(
- spender_checksum_address, amount_of_tokens_to_spend, token_checksum_address
- )
- )
- print("Coins Approved for spend!")
diff --git a/src/talos/contracts/gmx/utils/datastore.py b/src/talos/contracts/gmx/utils/datastore.py
deleted file mode 100644
index ab2f2ba6..00000000
--- a/src/talos/contracts/gmx/utils/datastore.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import json
-import os
-from datetime import datetime
-from pathlib import Path
-from typing import Any
-
-import pandas as pd
-from dotenv import load_dotenv
-
-load_dotenv()
-package_dir = Path(__file__).parent.parent.parent
-
-
-def save_json_file_to_datastore(filename: str, data: dict[str, Any]) -> None:
- """
- Save a dictionary as json file to the datastore directory
- """
-
- filepath = os.path.join(package_dir, "data_store", filename)
-
- with open(filepath, "w") as f:
- json.dump(data, f)
-
-
-def save_csv_to_datastore(filename: str, dataframe: pd.DataFrame) -> None:
- """
- For a given filename, save pandas dataframe as a csv to datastore
- """
-
- archive_filepath = os.path.join(package_dir, "data_store", filename)
-
- if os.path.exists(archive_filepath):
- archive = pd.read_csv(archive_filepath)
-
- dataframe = pd.concat([archive, dataframe])
-
- dataframe.to_csv(os.path.join(package_dir, "data_store", filename), index=False)
-
-
-def make_timestamped_dataframe(
- data: list[dict[str, Any]] | dict[str, Any],
-) -> pd.DataFrame:
- """
- Add a new column to a given dataframe with a column for timestamp
-
- Parameters
- ----------
- data : pd.DataFrame
- dataframe to add timestamp column to.
-
- """
- dataframe = pd.DataFrame(data, index=[0])
- dataframe["timestamp"] = datetime.now()
-
- return dataframe
diff --git a/src/talos/contracts/gmx/utils/funding.py b/src/talos/contracts/gmx/utils/funding.py
deleted file mode 100644
index bfbd9947..00000000
--- a/src/talos/contracts/gmx/utils/funding.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from ..contracts.synthetics_reader.types import ReaderUtilsMarketInfo
-
-
-def apply_factor(value: int, factor: int):
- return value * factor / 10**30
-
-
-def get_funding_factor_per_period(
- market_info: ReaderUtilsMarketInfo,
- is_long: bool,
- period_in_seconds: int,
- long_interest_usd: int,
- short_interest_usd: int,
-) -> int:
- """
- For a given market, calculate the funding factor for a given period
- """
-
- funding_factor_per_second = market_info.next_funding.funding_factor_per_second * 10**-28
-
- long_pays_shorts = market_info.next_funding.longs_pay_shorts
-
- if is_long:
- is_larger_side = long_pays_shorts
- else:
- is_larger_side = not long_pays_shorts
-
- if is_larger_side:
- factor_per_second = funding_factor_per_second * -1
- else:
- if long_pays_shorts:
- larger_interest_usd = long_interest_usd
- smaller_interest_usd = short_interest_usd
-
- else:
- larger_interest_usd = short_interest_usd
- smaller_interest_usd = long_interest_usd
-
- if smaller_interest_usd > 0:
- ratio = larger_interest_usd * 10**30 / smaller_interest_usd
-
- else:
- ratio = 0
-
- factor_per_second = apply_factor(ratio, funding_factor_per_second)
-
- return factor_per_second * period_in_seconds
diff --git a/src/talos/contracts/gmx/utils/gas.py b/src/talos/contracts/gmx/utils/gas.py
deleted file mode 100644
index 4c6c7dc4..00000000
--- a/src/talos/contracts/gmx/utils/gas.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from eth_rpc.networks import Arbitrum
-from eth_typeshed.multicall import make_multicall
-
-from ..contracts.datastore import datastore
-from ..types.gas_limits import GasLimits
-from .funding import apply_factor
-from .keys import (
- decrease_order_gas_limit_key,
- deposit_gas_limit_key,
- execution_gas_fee_base_amount_key,
- execution_gas_fee_multiplier_key,
- increase_order_gas_limit_key,
- single_swap_gas_limit_key,
- swap_order_gas_limit_key,
- withdraw_gas_limit_key,
-)
-
-
-def get_execution_fee(gas_limits: GasLimits, estimated_gas_limit: int, gas_price: int) -> int:
- """
- Given a dictionary of gas_limits, the uncalled datastore object of a given operation, and the
- latest gas price, calculate the minimum execution fee required to perform an action
-
- Parameters
- ----------
- gas_limits : dict
- dictionary of uncalled datastore limit obkects.
- estimated_gas_limit : datastore_object
- the uncalled datastore object specific to operation that will be undertaken.
- gas_price : int
- latest gas price.
-
- """
-
- base_gas_limit = gas_limits.estimated_fee_base_gas_limit
- multiplier_factor = gas_limits.estimated_fee_multiplier_factor
- adjusted_gas_limit = base_gas_limit + apply_factor(estimated_gas_limit, multiplier_factor)
-
- return int(adjusted_gas_limit * gas_price)
-
-
-async def get_gas_limits() -> GasLimits:
- """
- Given a Web3 contract object of the datstore, return a dictionary with the uncalled gas limits
- that correspond to various operations that will require the execution fee to calculated for.
-
- Parameters
- ----------
- datastore_object : web3 object
- contract connection.
-
- """
- multicall = make_multicall(Arbitrum)
-
- calls = [
- datastore.get_uint(deposit_gas_limit_key()),
- datastore.get_uint(withdraw_gas_limit_key()),
- datastore.get_uint(single_swap_gas_limit_key()),
- datastore.get_uint(swap_order_gas_limit_key()),
- datastore.get_uint(increase_order_gas_limit_key()),
- datastore.get_uint(decrease_order_gas_limit_key()),
- datastore.get_uint(execution_gas_fee_base_amount_key()),
- datastore.get_uint(execution_gas_fee_multiplier_key()),
- ]
-
- results = await multicall.execute(*calls)
-
- gas_limits = GasLimits(
- deposit=results[0],
- withdraw=results[1],
- single_swap=results[2],
- swap_order=results[3],
- increase_order=results[4],
- decrease_order=results[5],
- estimated_fee_base_gas_limit=results[6],
- estimated_fee_multiplier_factor=results[7],
- )
-
- return gas_limits
diff --git a/src/talos/contracts/gmx/utils/hash.py b/src/talos/contracts/gmx/utils/hash.py
deleted file mode 100644
index f380089a..00000000
--- a/src/talos/contracts/gmx/utils/hash.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from typing import Any
-
-from eth_abi.abi import encode
-from eth_hash.auto import keccak
-from eth_rpc.types import primitives
-
-
-def create_hash(data_type_list: list[str], data_value_list: list[Any]) -> primitives.bytes32:
- """
- Create a keccak hash using a list of strings corresponding to data types
- and a list of the values the data types match
-
- Parameters
- ----------
- data_type_list : list
- list of data types as strings.
- data_value_list : list
- list of values as strings.
-
- Returns
- -------
- bytes
- encoded hashed key .
-
- """
- byte_data = encode(data_type_list, data_value_list)
- return primitives.bytes32(keccak(byte_data))
-
-
-def create_hash_string(string: str) -> primitives.bytes32:
- """
- Value to hash
-
- Parameters
- ----------
- string : str
- string to hash.
-
- Returns
- -------
- bytes
- hashed string.
-
- """
- return create_hash(["string"], [string])
- return create_hash(["string"], [string])
diff --git a/src/talos/contracts/gmx/utils/keys.py b/src/talos/contracts/gmx/utils/keys.py
deleted file mode 100644
index 333ebfe3..00000000
--- a/src/talos/contracts/gmx/utils/keys.py
+++ /dev/null
@@ -1,131 +0,0 @@
-from eth_rpc.types import primitives
-
-from .hash import create_hash, create_hash_string
-
-ACCOUNT_POSITION_LIST = create_hash_string("ACCOUNT_POSITION_LIST")
-CLAIMABLE_FEE_AMOUNT = create_hash_string("CLAIMABLE_FEE_AMOUNT")
-DECREASE_ORDER_GAS_LIMIT = create_hash_string("DECREASE_ORDER_GAS_LIMIT")
-DEPOSIT_GAS_LIMIT = create_hash_string("DEPOSIT_GAS_LIMIT")
-
-WITHDRAWAL_GAS_LIMIT = create_hash_string("WITHDRAWAL_GAS_LIMIT")
-
-EXECUTION_GAS_FEE_BASE_AMOUNT = create_hash_string("EXECUTION_GAS_FEE_BASE_AMOUNT")
-EXECUTION_GAS_FEE_MULTIPLIER_FACTOR = create_hash_string("EXECUTION_GAS_FEE_MULTIPLIER_FACTOR")
-INCREASE_ORDER_GAS_LIMIT = create_hash_string("INCREASE_ORDER_GAS_LIMIT")
-MAX_OPEN_INTEREST = create_hash_string("MAX_OPEN_INTEREST")
-MAX_POSITION_IMPACT_FACTOR_FOR_LIQUIDATIONS_KEY = create_hash_string("MAX_POSITION_IMPACT_FACTOR_FOR_LIQUIDATIONS")
-MAX_PNL_FACTOR_FOR_TRADERS = create_hash_string("MAX_PNL_FACTOR_FOR_TRADERS")
-MAX_PNL_FACTOR_FOR_DEPOSITS = create_hash_string("MAX_PNL_FACTOR_FOR_DEPOSITS")
-MAX_PNL_FACTOR_FOR_WITHDRAWALS = create_hash_string("MAX_PNL_FACTOR_FOR_WITHDRAWALS")
-MIN_ADDITIONAL_GAS_FOR_EXECUTION = create_hash_string("MIN_ADDITIONAL_GAS_FOR_EXECUTION")
-MIN_COLLATERAL_USD = create_hash_string("MIN_COLLATERAL_USD")
-MIN_COLLATERAL_FACTOR_KEY = create_hash_string("MIN_COLLATERAL_FACTOR")
-MIN_POSITION_SIZE_USD = create_hash_string("MIN_POSITION_SIZE_USD")
-OPEN_INTEREST_IN_TOKENS = create_hash_string("OPEN_INTEREST_IN_TOKENS")
-OPEN_INTEREST = create_hash_string("OPEN_INTEREST")
-OPEN_INTEREST_RESERVE_FACTOR = create_hash_string("OPEN_INTEREST_RESERVE_FACTOR")
-POOL_AMOUNT = create_hash_string("POOL_AMOUNT")
-RESERVE_FACTOR = create_hash_string("RESERVE_FACTOR")
-SINGLE_SWAP_GAS_LIMIT = create_hash_string("SINGLE_SWAP_GAS_LIMIT")
-SWAP_ORDER_GAS_LIMIT = create_hash_string("SWAP_ORDER_GAS_LIMIT")
-VIRTUAL_TOKEN_ID = create_hash_string("VIRTUAL_TOKEN_ID")
-CLAIMABLE_FUNDING_AMOUNT = create_hash_string("CLAIMABLE_FUNDING_AMOUNT")
-
-def accountPositionListKey(account: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address"], [ACCOUNT_POSITION_LIST, account])
-
-
-def claimable_fee_amount_key(market: str, token: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "address"], [CLAIMABLE_FEE_AMOUNT, market, token])
-
-
-def claimable_funding_amount_key(market: str, token: str, account: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "address", "address"], [CLAIMABLE_FUNDING_AMOUNT, market, token, account])
-
-
-def decrease_order_gas_limit_key() -> primitives.bytes32:
- return DECREASE_ORDER_GAS_LIMIT
-
-
-def deposit_gas_limit_key() -> primitives.bytes32:
- return DEPOSIT_GAS_LIMIT
-
-
-def execution_gas_fee_base_amount_key() -> primitives.bytes32:
- return EXECUTION_GAS_FEE_BASE_AMOUNT
-
-
-def execution_gas_fee_multiplier_key() -> primitives.bytes32:
- return EXECUTION_GAS_FEE_MULTIPLIER_FACTOR
-
-
-def increase_order_gas_limit_key() -> primitives.bytes32:
- return INCREASE_ORDER_GAS_LIMIT
-
-
-def min_additional_gas_for_execution_key() -> primitives.bytes32:
- return MIN_ADDITIONAL_GAS_FOR_EXECUTION
-
-
-def min_collateral() -> primitives.bytes32:
- return MIN_COLLATERAL_USD
-
-
-def min_collateral_factor_key(market: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address"], [MIN_COLLATERAL_FACTOR_KEY, market])
-
-
-def max_open_interest_key(market: str, is_long: bool) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "bool"], [MAX_OPEN_INTEREST, market, is_long])
-
-
-def max_position_impact_factor_for_liquidations_key(market: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address"], [MAX_POSITION_IMPACT_FACTOR_FOR_LIQUIDATIONS_KEY, market])
-
-
-def open_interest_in_tokens_key(market: str, collateral_token: str, is_long: bool) -> primitives.bytes32:
- return create_hash(
- ["bytes32", "address", "address", "bool"], [OPEN_INTEREST_IN_TOKENS, market, collateral_token, is_long]
- )
-
-
-def open_interest_key(market: str, collateral_token: str, is_long: bool) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "address", "bool"], [OPEN_INTEREST, market, collateral_token, is_long])
-
-
-def open_interest_reserve_factor_key(market: str, is_long: bool) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "bool"], [OPEN_INTEREST_RESERVE_FACTOR, market, is_long])
-
-
-def pool_amount_key(market: str, token: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "address"], [POOL_AMOUNT, market, token])
-
-
-def reserve_factor_key(market: str, is_long: bool) -> primitives.bytes32:
- return create_hash(["bytes32", "address", "bool"], [RESERVE_FACTOR, market, is_long])
-
-
-def single_swap_gas_limit_key() -> primitives.bytes32:
- return SINGLE_SWAP_GAS_LIMIT
-
-
-def swap_order_gas_limit_key() -> primitives.bytes32:
- return SWAP_ORDER_GAS_LIMIT
-
-
-def virtualTokenIdKey(token: str) -> primitives.bytes32:
- return create_hash(["bytes32", "address"], [VIRTUAL_TOKEN_ID, token])
-
-
-def withdraw_gas_limit_key() -> primitives.bytes32:
- return WITHDRAWAL_GAS_LIMIT
-
-
-if __name__ == "__main__":
- # market = '0x70d95587d40A2caf56bd97485aB3Eec10Bee6336'
- # token = '0x82aF49447D8a07e3bd95BD0d56f35241523fBab1'
- # token = '0xaf88d065e77c8cC2239327C5EDb3A432268e5831'
-
- token = "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"
-
- hash_data = virtualTokenIdKey(token)
diff --git a/src/talos/contracts/gmx/utils/math.py b/src/talos/contracts/gmx/utils/math.py
deleted file mode 100644
index 27e5c42c..00000000
--- a/src/talos/contracts/gmx/utils/math.py
+++ /dev/null
@@ -1,7 +0,0 @@
-def median(values: list[float]) -> float:
- values.sort()
- n = len(values)
- mid = n // 2
- if n % 2 == 0:
- return (values[mid - 1] + values[mid]) / 2
- return values[mid]
diff --git a/src/talos/contracts/gmx/utils/numerize.py b/src/talos/contracts/gmx/utils/numerize.py
deleted file mode 100644
index 3f7f0e29..00000000
--- a/src/talos/contracts/gmx/utils/numerize.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from decimal import Decimal
-
-
-def round_num(n, decimals):
- '''
- Params:
- n - number to round
- decimals - number of decimal places to round to
- Round number to 2 decimal places
- For example:
- 10.0 -> 10
- 10.222 -> 10.22
- '''
- return n.to_integral() if n == n.to_integral() else round(n.normalize(), decimals)
-
-
-def drop_zero(n):
- '''
- Drop trailing 0s
- For example:
- 10.100 -> 10.1
- '''
- n = str(n)
- return n.rstrip('0').rstrip('.') if '.' in n else n
-
-
-def numerize(n, decimals=2): # noqa: C901
- '''
- Params:
- n - number to be numerized
- decimals - number of decimal places to round to
- Converts numbers like:
- 1,000 -> 1K
- 1,000,000 -> 1M
- 1,000,000,000 -> 1B
- 1,000,000,000,000 -> 1T
- '''
- is_negative_string = ""
- if n < 0:
- is_negative_string = "-"
- n = abs(Decimal(n))
- if n < 1000:
- return is_negative_string + str(drop_zero(round_num(n, decimals)))
- elif n >= 1000 and n < 1000000:
- if n % 1000 == 0:
- return is_negative_string + str(int(n / 1000)) + "K"
- else:
- n = n / 1000
- return is_negative_string + str(drop_zero(round_num(n, decimals))) + "K"
- elif n >= 1000000 and n < 1000000000:
- if n % 1000000 == 0:
- return is_negative_string + str(int(n / 1000000)) + "M"
- else:
- n = n / 1000000
- return is_negative_string + str(drop_zero(round_num(n, decimals))) + "M"
- elif n >= 1000000000 and n < 1000000000000:
- if n % 1000000000 == 0:
- return is_negative_string + str(int(n / 1000000000)) + "B"
- else:
- n = n / 1000000000
- return is_negative_string + str(drop_zero(round_num(n, decimals))) + "B"
- elif n >= 1000000000000 and n < 1000000000000000:
- if n % 1000000000000 == 0:
- return is_negative_string + str(int(n / 1000000000000)) + "T"
- else:
- n = n / 1000000000000
- return is_negative_string + str(drop_zero(round_num(n, decimals))) + "T"
- else:
- return is_negative_string + str(n)
diff --git a/src/talos/contracts/gmx/utils/price.py b/src/talos/contracts/gmx/utils/price.py
deleted file mode 100644
index 09c91804..00000000
--- a/src/talos/contracts/gmx/utils/price.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from ..constants import PRECISION
-from ..contracts.synthetics_reader import ExecutionPriceParams, synthetics_reader
-
-
-async def get_execution_price_and_price_impact(
- params: ExecutionPriceParams,
- decimals: int,
-) -> dict[str, float]:
- """
- Get the execution price and price impact for a position
-
- Parameters
- ----------
- chain : str
- arbitrum or avalanche.
- params : dict
- dictionary of the position parameters.
- decimals : int
- number of decimals of the token being traded eg ETH == 18.
-
- """
- output = await synthetics_reader.get_execution_price(params).get()
-
- return {
- "execution_price": output.execution_price / 10 ** (PRECISION - decimals),
- "price_impact_usd": output.price_impact_usd / 10**PRECISION,
- }
diff --git a/src/talos/contracts/gmx/utils/swap.py b/src/talos/contracts/gmx/utils/swap.py
deleted file mode 100644
index 2ffa9dff..00000000
--- a/src/talos/contracts/gmx/utils/swap.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from typing import Any
-
-from eth_typing import ChecksumAddress
-
-from ..types import Market
-
-
-def find_dictionary_by_key_value(outer_dict: dict[str, Any], key: str, value: str) -> Any:
- """
- For a given dictionary, find a value which matches a set of keys
- """
- for inner_dict in outer_dict.values():
- if key in inner_dict and getattr(inner_dict, key) == value:
- return inner_dict
- return None
-
-
-def determine_swap_route(
- markets: dict[ChecksumAddress, Market], in_token: ChecksumAddress, out_token: ChecksumAddress
-) -> tuple[list[ChecksumAddress], bool]:
- """
- Using the available markets, find the list of GMX markets required
- to swap from token in to token out
-
- Parameters
- ----------
- markets : dict
- dictionary of markets output by getMarketInfo.
- in_token : str
- contract address of in token.
- out_token : str
- contract address of out token.
-
- Returns
- -------
- list
- list of GMX markets to swap through.
- is_requires_multi_swap : TYPE
- requires more than one market to pass thru.
-
- """
-
- if in_token == "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f":
- in_token = "0x47904963fc8b2340414262125aF798B9655E58Cd"
-
- if out_token == "0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f":
- out_token = "0x47904963fc8b2340414262125aF798B9655E58Cd"
-
- if in_token == "0xaf88d065e77c8cC2239327C5EDb3A432268e5831":
- gmx_market_address = find_dictionary_by_key_value(markets, "index_token_address", out_token)[
- "gmx_market_address"
- ]
- else:
- gmx_market_address = find_dictionary_by_key_value(markets, "index_token_address", in_token)[
- "gmx_market_address"
- ]
-
- is_requires_multi_swap = False
-
- if (
- out_token != "0xaf88d065e77c8cC2239327C5EDb3A432268e5831"
- and in_token != "0xaf88d065e77c8cC2239327C5EDb3A432268e5831"
- ):
- is_requires_multi_swap = True
- second_gmx_market_address = find_dictionary_by_key_value(markets, "index_token_address", out_token)[
- "gmx_market_address"
- ]
-
- return [gmx_market_address, second_gmx_market_address], is_requires_multi_swap
-
- return [gmx_market_address], is_requires_multi_swap
diff --git a/src/talos/contracts/gmx/utils/tokens.py b/src/talos/contracts/gmx/utils/tokens.py
deleted file mode 100644
index 1a82704b..00000000
--- a/src/talos/contracts/gmx/utils/tokens.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import httpx
-from eth_rpc.utils import to_checksum
-from eth_typing import ChecksumAddress
-
-from ..types.tokens import TokenMetadata
-
-
-async def get_tokens_address_dict() -> dict[ChecksumAddress, TokenMetadata]:
- """
- Query the GMX infra api for to generate dictionary of tokens available on v2
-
- Parameters
- ----------
- chain : str
- avalanche of arbitrum.
-
- Returns
- -------
- token_address_dict : dict
- dictionary containing available tokens to trade on GMX.
-
- """
- url = "https://arbitrum-api.gmxinfra.io/tokens"
-
- try:
- async with httpx.AsyncClient() as client:
- response = await client.get(url)
-
- # Check if the request was successful (status code 200)
- if response.status_code == 200:
- # Parse the JSON response
- token_infos = response.json()["tokens"]
- else:
- print(f"Error: {response.status_code}")
- except httpx.RequestError as e:
- print(f"Error: {e}")
-
- token_address_dict = {}
-
- for token_info in token_infos:
- token_address_dict[to_checksum(token_info["address"])] = TokenMetadata(**token_info)
-
- return token_address_dict
-
-
-if __name__ == "__main__":
- import asyncio
-
- result = asyncio.run(get_tokens_address_dict())
- print(result)
diff --git a/src/talos/contracts/weth.py b/src/talos/contracts/weth.py
deleted file mode 100644
index 2c8a4d7f..00000000
--- a/src/talos/contracts/weth.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Annotated
-
-from eth_rpc import ContractFunc, ProtocolBase
-from eth_rpc.types import METHOD, Name, NoArgs, primitives
-
-
-class WETH(ProtocolBase):
- deposit: Annotated[ContractFunc[NoArgs, None], Name("deposit")] = METHOD
- withdraw: Annotated[ContractFunc[primitives.uint256, None], Name("withdraw")] = METHOD
diff --git a/src/talos/core/__init__.py b/src/talos/core/__init__.py
deleted file mode 100644
index 2f760c5d..00000000
--- a/src/talos/core/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from talos.core.agent import Agent
-from talos.core.main_agent import MainAgent
-from talos.core.memory import Memory
-
-__all__ = ["Agent", "MainAgent", "Memory"]
diff --git a/src/talos/core/agent.py b/src/talos/core/agent.py
deleted file mode 100644
index 0f4c9a26..00000000
--- a/src/talos/core/agent.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Optional, Union
-
-from langchain_core.language_models import BaseChatModel
-from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
-from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
-from langchain_core.runnables import Runnable
-from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
-
-from talos.core.memory import Memory
-from talos.data.dataset_manager import DatasetManager
-from talos.hypervisor.supervisor import Supervisor
-from talos.prompts.prompt_manager import PromptManager
-from talos.tools.supervised_tool import SupervisedTool
-from talos.tools.tool_manager import ToolManager
-
-
-class Agent(BaseModel):
- model_config = ConfigDict(arbitrary_types_allowed=True)
- """
- Agent is a class that represents an agent that can interact with the user.
-
- Args:
- model_name: The name of the model to use.
- prompt_manager: The prompt manager to use.
- schema_class: The schema class to use for structured output.
- tool_manager: The tool manager to use.
- user_id: Optional user identifier for conversation tracking.
- session_id: Optional session identifier for conversation grouping.
- use_database_memory: Whether to use database-backed memory instead of files.
- """
-
- model: BaseChatModel | Runnable
- prompt_manager: PromptManager | None = Field(None, alias="prompt_manager")
- schema_class: type[BaseModel] | None = Field(None, alias="schema")
- tool_manager: ToolManager = Field(default_factory=ToolManager, alias="tool_manager")
- supervisor: Optional[Supervisor] = None
- is_main_agent: bool = False
- memory: Optional[Memory] = None
- dataset_manager: Optional[DatasetManager] = None
- user_id: Optional[str] = None
- session_id: Optional[str] = None
- use_database_memory: bool = False
- verbose: Union[bool, int] = False
-
- _prompt_template: ChatPromptTemplate = PrivateAttr()
- history: list[BaseMessage] = []
-
- def model_post_init(self, __context: Any) -> None:
- if self.memory:
- from talos.tools.memory_tool import AddMemoryTool
- self.tool_manager.register_tool(AddMemoryTool(agent=self))
-
- def set_prompt(self, name: str | list[str]):
- if not self.prompt_manager:
- raise ValueError("Prompt manager not initialized.")
-
- prompt_names = name if isinstance(name, list) else [name]
- if self.dataset_manager:
- prompt_names.append("relevant_documents_prompt")
- if self.memory:
- prompt_names.append("relevant_memories_prompt")
-
- prompt = self.prompt_manager.get_prompt(prompt_names)
- if not prompt:
- raise ValueError(f"The prompt '{prompt_names}' is not defined.")
- # Build a chat prompt that contains the system template and leaves a
- # placeholder for the ongoing conversation (`messages`).
- # This allows the user input and prior history to be provided to the
- # model at runtime so that responses can be contextual and not ignore
- # the latest message.
- self._prompt_template = ChatPromptTemplate.from_messages(
- [
- ("system", prompt.template),
- MessagesPlaceholder(variable_name="messages"),
- ]
- )
-
- def add_supervisor(self, supervisor: Supervisor):
- """
- Adds a supervisor to the agent.
- """
- self.supervisor = supervisor
-
- def add_to_history(self, messages: list[BaseMessage]):
- """
- Adds a list of messages to the history.
- """
- self.history.extend(messages)
-
- def reset_history(self):
- """
- Resets the history of the agent.
- """
- self.history = []
-
- def _build_context(self, query: str, **kwargs) -> dict:
- """
- A base method for adding context to the query.
- """
- context = {}
-
- if self.dataset_manager and query:
- relevant_documents = self.dataset_manager.search(query, k=5, context_search=True)
- context["relevant_documents"] = relevant_documents
-
- if "relevant_memories" in kwargs and kwargs["relevant_memories"]:
- context["relevant_memories"] = kwargs["relevant_memories"]
-
- return context
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def run(self, message: str, history: list[BaseMessage] | None = None, **kwargs) -> BaseModel:
- if self.memory:
- relevant_memories = self.memory.search(message)
- kwargs["relevant_memories"] = relevant_memories
-
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1 and relevant_memories:
- print(f"🧠 Found {len(relevant_memories)} relevant memories for context")
- if verbose_level >= 2:
- for i, memory in enumerate(relevant_memories[:3], 1):
- print(f" {i}. {memory.description}")
- if len(relevant_memories) > 3:
- print(f" ... and {len(relevant_memories) - 3} more")
-
- if history is None:
- history = self.memory.load_history()
-
- self._prepare_run(message, history)
- chain = self._create_chain()
- context = self._build_context(message, **kwargs)
- result = chain.invoke({"messages": self.history, **context, **kwargs})
- processed_result = self._process_result(result)
- if self.memory:
- self.memory.save_history(self.history)
- return processed_result
-
- def add_memory(self, description: str, metadata: Optional[dict] = None):
- if self.memory:
- self.memory.add_memory(description, metadata)
-
- def _prepare_run(self, message: str, history: list[BaseMessage] | None = None) -> None:
- if history:
- self.history.clear()
- self.history.extend(history)
- if self.prompt_manager:
- self.prompt_manager.update_prompt_template(self.history)
- self.history.append(HumanMessage(content=message))
- tools = self.tool_manager.get_all_tools()
- for tool in tools:
- if isinstance(tool, SupervisedTool):
- tool.set_supervisor(self.supervisor)
- if tools and isinstance(self.model, BaseChatModel):
- self.model = self.model.bind_tools(tools)
-
- def _create_chain(self) -> Runnable:
- if self.schema_class and isinstance(self.model, BaseChatModel):
- structured_llm = self.model.with_structured_output(self.schema_class)
- return self._prompt_template | structured_llm
- return self._prompt_template | self.model
-
- def _process_result(self, result: Any) -> BaseModel:
- if isinstance(result, AIMessage):
- self.history.append(result)
-
- if hasattr(result, 'tool_calls') and result.tool_calls:
- tool_messages = []
- for tool_call in result.tool_calls:
- try:
- tool = self.tool_manager.get_tool(tool_call['name'])
- if tool:
- tool_result = tool.invoke(tool_call['args'])
- if tool_call['name'] == 'dataset_search':
- display_result = str(tool_result)[:100] + "..." if len(str(tool_result)) > 100 else str(tool_result)
- else:
- display_result = str(tool_result)
- print(f"🔧 Executed tool '{tool_call['name']}': {display_result}", flush=True)
-
- tool_message = ToolMessage(
- content=str(tool_result),
- tool_call_id=tool_call['id']
- )
- tool_messages.append(tool_message)
- except Exception as e:
- print(f"❌ Tool execution error for '{tool_call['name']}': {e}", flush=True)
- tool_message = ToolMessage(
- content=f"Error: {str(e)}",
- tool_call_id=tool_call['id']
- )
- tool_messages.append(tool_message)
-
- self.history.extend(tool_messages)
-
- chain = self._create_chain()
- last_human_message = ""
- for message in reversed(self.history):
- if isinstance(message, HumanMessage):
- last_human_message = str(message.content)
- break
- context = self._build_context(last_human_message)
- new_result = chain.invoke({"messages": self.history, **context})
-
- return self._process_result(new_result)
-
- return result
- if isinstance(result, BaseModel):
- self.history.append(AIMessage(content=str(result)))
- return result
- if isinstance(result, dict) and self.schema_class:
- modelled_result = self.schema_class.parse_obj(result)
- self.history.append(AIMessage(content=str(modelled_result)))
- return modelled_result
- raise TypeError(f"Expected a Pydantic model or a dictionary, but got {type(result)}")
diff --git a/src/talos/core/extensible_agent.py b/src/talos/core/extensible_agent.py
deleted file mode 100644
index 02ce801e..00000000
--- a/src/talos/core/extensible_agent.py
+++ /dev/null
@@ -1,961 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
-from langchain_core.messages import BaseMessage
-from pydantic import BaseModel, ConfigDict
-
-from talos.core.memory import Memory
-from talos.dag.manager import DAGManager
-from talos.prompts.prompt_manager import PromptManager
-from talos.skills.base import Skill
-from talos.tools.tool_manager import ToolManager
-
-from talos.dag.dag_agent import DAGAgent
-if TYPE_CHECKING:
- from talos.dag.structured_nodes import NodeVersion
-
-
-class SupportAgent(BaseModel):
- """
- Specialized support agent with a specific architecture for handling domain tasks.
- Each support agent has predefined capabilities and delegation patterns.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str
- domain: str # e.g., "governance", "security", "development"
- description: str
- architecture: Dict[str, Any] # Specific architecture definition
-
- skills: List[Skill] = []
- model: Optional[Any] = None
- memory: Optional[Memory] = None
- prompt_manager: Optional[PromptManager] = None
-
- delegation_keywords: List[str] = [] # Keywords that trigger this agent
- task_patterns: List[str] = [] # Task patterns this agent handles
-
- conversation_history: List[BaseMessage] = []
-
- def model_post_init(self, __context: Any) -> None:
- if not self.memory:
- self._setup_agent_memory()
- self._validate_architecture()
-
- def _setup_agent_memory(self) -> None:
- """Setup memory for this support agent."""
- from langchain_openai import OpenAIEmbeddings
- from pathlib import Path
-
- embeddings_model = OpenAIEmbeddings()
- memory_dir = Path("memory") / f"agent_{self.name}"
- memory_dir.mkdir(parents=True, exist_ok=True)
-
- self.memory = Memory(
- file_path=memory_dir / "memories.json",
- embeddings_model=embeddings_model,
- history_file_path=memory_dir / "history.json",
- use_database=False,
- auto_save=True,
- verbose=False,
- )
-
- def _validate_architecture(self) -> None:
- """Validate that the agent architecture is properly defined."""
- required_keys = ["task_flow", "decision_points", "capabilities"]
- for key in required_keys:
- if key not in self.architecture:
- self.architecture[key] = []
-
- def analyze_task(self, query: str, context: Dict[str, Any]) -> Dict[str, Any]:
- """
- Analyze the task using the agent's specific architecture.
- Determines the best approach based on the agent's capabilities.
- """
- model = self.model
- if not model:
- from langchain_openai import ChatOpenAI
- model = ChatOpenAI(model="gpt-4o-mini")
-
- analysis_prompt = f"""
- You are a specialized support agent for: {self.domain}
- Agent: {self.name}
- Description: {self.description}
-
- Architecture capabilities: {self.architecture.get('capabilities', [])}
- Task patterns you handle: {self.task_patterns}
-
- Analyze this task: {query}
- Context: {context}
-
- Based on your architecture, determine:
- 1. Can you handle this task? (yes/no)
- 2. What approach should you take?
- 3. What information do you need?
- 4. Which of your skills should be used?
-
- Respond with a JSON object:
- {{
- "can_handle": true/false,
- "approach": "description of approach",
- "required_info": ["info1", "info2"],
- "recommended_skills": ["skill1", "skill2"],
- "confidence": 0.0-1.0
- }}
- """
-
- try:
- from langchain_core.messages import HumanMessage
- response = model.invoke([HumanMessage(content=analysis_prompt)])
-
- enhanced_context = context.copy()
- enhanced_context["agent_analysis"] = response.content
- enhanced_context["agent_name"] = self.name
- enhanced_context["agent_domain"] = self.domain
-
- return enhanced_context
-
- except Exception:
- return context
-
- def execute_task(self, context: Dict[str, Any]) -> Any:
- """Execute the task using the agent's architecture and skills."""
- if self.memory:
- memory_context = self.memory.search(context.get("current_query", ""), k=3)
- context["agent_memory"] = memory_context
-
- task_flow = self.architecture.get("task_flow", ["analyze", "execute"])
- results = {}
-
- for step in task_flow:
- if step == "analyze":
- results["analysis"] = self.analyze_task(
- context.get("current_query", ""), context
- )
- elif step == "execute" and self.skills:
- skill = self.skills[0] # For now, use first skill
- results["execution"] = skill.run(**context)
-
- if self.memory:
- self.memory.add_memory(
- f"Agent {self.name} executed task: {str(results)[:200]}",
- {"agent": self.name, "domain": self.domain, "context": context}
- )
-
- return results
-
-
-class SupportAgentRegistry(BaseModel):
- """Registry for managing specialized support agents with structured delegation rules."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- _agents: Dict[str, SupportAgent] = {}
- _delegation_map: Dict[str, str] = {} # keyword -> agent_name
-
- def register_agent(self, agent: SupportAgent) -> None:
- """Register a new support agent and update delegation rules."""
- self._agents[agent.name] = agent
-
- for keyword in agent.delegation_keywords:
- self._delegation_map[keyword.lower()] = agent.name
-
- def unregister_agent(self, agent_name: str) -> bool:
- """Remove a support agent and clean up delegation rules."""
- if agent_name in self._agents:
- keywords_to_remove = [k for k, v in self._delegation_map.items() if v == agent_name]
- for keyword in keywords_to_remove:
- del self._delegation_map[keyword]
-
- del self._agents[agent_name]
- return True
- return False
-
- def get_agent(self, agent_name: str) -> Optional[SupportAgent]:
- """Get a support agent by name."""
- return self._agents.get(agent_name)
-
- def find_agent_for_task(self, query: str) -> Optional[SupportAgent]:
- """Find the best support agent for a given task based on delegation rules."""
- query_lower = query.lower()
-
- for keyword, agent_name in self._delegation_map.items():
- if keyword in query_lower:
- return self._agents.get(agent_name)
-
- return None
-
- def list_agents(self) -> List[str]:
- """List all registered agent names."""
- return list(self._agents.keys())
-
- def get_all_agents(self) -> Dict[str, SupportAgent]:
- """Get all registered support agents."""
- return self._agents.copy()
-
- def get_delegation_rules(self) -> Dict[str, str]:
- """Get current delegation rules."""
- return self._delegation_map.copy()
-
-
-class DelegatingMainAgent(DAGAgent):
- """
- Main agent that delegates tasks to specialized support agents with structured DAG structure.
- Each support agent has a specific architecture for handling their domain of tasks.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- support_agents: Dict[str, "SupportAgent"] = {}
- delegation_rules: Dict[str, str] = {} # keyword -> agent mapping
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- self._setup_support_agents()
- self._build_structured_dag()
-
- def _setup_support_agents(self) -> None:
- """Setup specialized support agents with structured architectures."""
- if self.prompt_manager:
- from talos.skills.proposals import ProposalsSkill
-
- governance_agent = SupportAgent(
- name="governance_agent",
- domain="governance",
- description="Specialized agent for governance proposals and DAO operations",
- architecture={
- "task_flow": ["analyze", "research", "evaluate", "execute"],
- "decision_points": ["proposal_type", "complexity", "stakeholders"],
- "capabilities": ["proposal_analysis", "voting_guidance", "governance_research"]
- },
- skills=[ProposalsSkill(llm=self.model, prompt_manager=self.prompt_manager)], # type: ignore
- delegation_keywords=["proposal", "governance", "voting", "dao"],
- task_patterns=["analyze proposal", "evaluate governance", "voting recommendation"]
- )
- self.support_agents["governance"] = governance_agent
-
- from talos.skills.cryptography import CryptographySkill
-
- security_agent = SupportAgent(
- name="security_agent",
- domain="security",
- description="Specialized agent for cryptography and security operations",
- architecture={
- "task_flow": ["validate", "encrypt", "secure"],
- "decision_points": ["security_level", "encryption_type", "key_management"],
- "capabilities": ["encryption", "decryption", "key_generation", "security_audit"]
- },
- skills=[CryptographySkill()],
- delegation_keywords=["encrypt", "decrypt", "security", "crypto", "key"],
- task_patterns=["encrypt data", "decrypt message", "security analysis"]
- )
- self.support_agents["security"] = security_agent
-
- self._setup_optional_agents()
-
- def _setup_optional_agents(self) -> None:
- """Setup optional support agents that depend on external services."""
- try:
- from talos.skills.twitter_sentiment import TwitterSentimentSkill
- from talos.tools.twitter_client import TwitterConfig
-
- TwitterConfig() # Check if Twitter token is available
-
- if self.prompt_manager:
- social_agent = SupportAgent(
- name="social_agent",
- domain="social_media",
- description="Specialized agent for social media analysis and sentiment",
- architecture={
- "task_flow": ["collect", "analyze", "sentiment", "report"],
- "decision_points": ["platform", "sentiment_type", "analysis_depth"],
- "capabilities": ["sentiment_analysis", "trend_detection", "social_monitoring"]
- },
- skills=[TwitterSentimentSkill(prompt_manager=self.prompt_manager)],
- delegation_keywords=["twitter", "sentiment", "social", "trend"],
- task_patterns=["analyze sentiment", "social media analysis", "trend detection"]
- )
- self.support_agents["social"] = social_agent
-
- except (ImportError, ValueError):
- pass # Twitter dependencies not available
-
- try:
- from talos.skills.pr_review import PRReviewSkill
- from talos.tools.github.tools import GithubTools
- from talos.settings import GitHubSettings
-
- github_settings = GitHubSettings()
- if github_settings.GITHUB_API_TOKEN and self.prompt_manager:
- dev_agent = SupportAgent(
- name="development_agent",
- domain="development",
- description="Specialized agent for code review and development tasks",
- architecture={
- "task_flow": ["analyze_code", "review", "suggest", "validate"],
- "decision_points": ["code_quality", "security_issues", "best_practices"],
- "capabilities": ["code_review", "pr_analysis", "security_scan", "quality_check"]
- },
- skills=[PRReviewSkill(
- llm=self.model, # type: ignore
- prompt_manager=self.prompt_manager,
- github_tools=GithubTools(token=github_settings.GITHUB_API_TOKEN)
- )],
- delegation_keywords=["github", "pr", "review", "code", "development"],
- task_patterns=["review pull request", "analyze code", "development task"]
- )
- self.support_agents["development"] = dev_agent
-
- except (ImportError, ValueError):
- pass # GitHub dependencies not available
-
- def _build_structured_dag(self) -> None:
- """Build a structured DAG structure with predefined delegation patterns."""
- self.delegation_rules = {}
- for agent in self.support_agents.values():
- for keyword in agent.delegation_keywords:
- self.delegation_rules[keyword.lower()] = agent.name
-
- self._rebuild_dag()
-
- def delegate_task(self, query: str, context: Optional[Dict[str, Any]] = None) -> Any:
- """
- Delegate a task to the appropriate support agent based on structured rules.
-
- Args:
- query: The task query
- context: Additional context for the task
-
- Returns:
- Result from the delegated agent or main agent
- """
- if context is None:
- context = {}
-
- query_lower = query.lower()
- delegated_agent = None
-
- for keyword, agent_name in self.delegation_rules.items():
- if keyword in query_lower:
- delegated_agent = self.support_agents.get(agent_name)
- break
-
- if delegated_agent:
- context["current_query"] = query
- context["delegated_from"] = "main_agent"
-
- enhanced_context = delegated_agent.analyze_task(query, context)
- return delegated_agent.execute_task(enhanced_context)
- else:
- return self._handle_main_agent_task(query, context)
-
- def _handle_main_agent_task(self, query: str, context: Dict[str, Any]) -> Any:
- """Handle tasks that don't match any support agent patterns."""
- context["current_query"] = query
- context["handled_by"] = "main_agent"
-
- return f"Main agent handling: {query}"
-
- def add_support_agent(
- self,
- name: str,
- domain: str,
- description: str,
- architecture: Dict[str, Any],
- skills: List[Skill],
- delegation_keywords: List[str],
- task_patterns: List[str],
- model: Optional[Any] = None
- ) -> SupportAgent:
- """
- Add a new support agent with specific architecture.
-
- Args:
- name: Unique name for the support agent
- domain: Domain of expertise
- description: Description of capabilities
- architecture: Specific architecture definition
- skills: List of skills this agent can use
- delegation_keywords: Keywords that trigger this agent
- task_patterns: Task patterns this agent handles
- model: Optional individual LLM for this agent
-
- Returns:
- The created SupportAgent instance
- """
- agent = SupportAgent(
- name=name,
- domain=domain,
- description=description,
- architecture=architecture,
- skills=skills,
- delegation_keywords=delegation_keywords,
- task_patterns=task_patterns,
- model=model or self.model
- )
-
- self.support_agents[domain] = agent
- self._build_structured_dag()
-
- return agent
-
- def remove_support_agent(self, domain: str) -> bool:
- """
- Remove a support agent from the system.
-
- Args:
- domain: Domain of the agent to remove
-
- Returns:
- True if agent was removed, False if not found
- """
- if domain in self.support_agents:
- agent = self.support_agents[domain]
-
- keywords_to_remove = [k for k, v in self.delegation_rules.items() if v == agent.name]
- for keyword in keywords_to_remove:
- del self.delegation_rules[keyword]
-
- del self.support_agents[domain]
- self._build_structured_dag()
- return True
- return False
-
- def list_support_agents(self) -> List[str]:
- """List all available support agents."""
- return list(self.support_agents.keys())
-
- def get_support_agent(self, domain: str) -> Optional[SupportAgent]:
- """Get a specific support agent by domain."""
- return self.support_agents.get(domain)
-
- def _rebuild_dag(self) -> None:
- """Rebuild the structured DAG with current support agents."""
- if not self.dag_manager:
- self.dag_manager = DAGManager()
-
- skills = []
- for agent in self.support_agents.values():
- skills.extend(agent.skills)
-
- services: list[Any] = [] # Services can be added similarly
-
- from talos.tools.tool_manager import ToolManager
- tool_manager = ToolManager()
-
- self.setup_dag(
- skills=skills,
- services=services,
- tool_manager=tool_manager,
- dataset_manager=self.dataset_manager
- )
-
- def run(self, message: str, history: list[BaseMessage] | None = None, **kwargs) -> BaseModel:
- """
- Execute query using delegation-based approach.
-
- Args:
- message: The query message
- history: Optional conversation history
- **kwargs: Additional context
-
- Returns:
- Result from execution
- """
- context = kwargs.copy()
- if history:
- context["history"] = history
-
- result = self.delegate_task(message, context)
-
- if not isinstance(result, BaseModel):
- from pydantic import BaseModel as PydanticBaseModel
-
- class TaskResult(PydanticBaseModel):
- result: Any
- delegated_to: str = "unknown"
-
- return TaskResult(result=result)
-
- return result
-
- def get_delegation_status(self) -> Dict[str, Any]:
- """Get status information about the delegation framework."""
- agents_info = {}
- for domain, agent in self.support_agents.items():
- agents_info[domain] = {
- "name": agent.name,
- "description": agent.description,
- "architecture": agent.architecture,
- "delegation_keywords": agent.delegation_keywords,
- "task_patterns": agent.task_patterns,
- "skills_count": len(agent.skills),
- "has_individual_model": agent.model is not None
- }
-
- return {
- "total_agents": len(self.support_agents),
- "delegation_rules": self.delegation_rules,
- "support_agents": agents_info,
- "dag_available": self.dag_manager is not None,
- "architecture_type": "structured_delegation"
- }
-
-
-class StructuredMainAgent(DAGAgent):
- """
- Main agent with structured DAG architecture for blockchain-native node upgrades.
-
- This class represents the core of a blockchain-native AI system that enables
- individual component upgrades while maintaining deterministic behavior and
- system integrity. It orchestrates a network of specialized support agents
- through a structured DAG architecture.
-
- Blockchain-Native Architecture:
- The agent is designed from the ground up for blockchain compatibility:
- - Deterministic execution paths ensure reproducible results
- - Individual node upgrades enable granular system evolution
- - Hash-based verification prevents tampering and ensures integrity
- - Serializable state enables on-chain storage and verification
-
- Key Features:
- - Structured DAG with versioned nodes for controlled upgrades
- - Deterministic delegation patterns using hash-based routing
- - Individual support agent upgrade capabilities
- - Blockchain-compatible serialization and state management
- - Comprehensive upgrade validation and rollback support
-
- Support Agent Architecture:
- Each support agent represents a specialized capability with:
- - Unique domain expertise (governance, analytics, research, etc.)
- - Individual versioning and upgrade policies
- - Specific task patterns and delegation keywords
- - Custom architectures for handling domain-specific tasks
-
- Upgrade Methodology:
- The system supports three types of upgrades:
- 1. Individual node upgrades with version validation
- 2. Controlled rollbacks to previous versions
- 3. DAG-wide configuration updates with integrity checks
-
- Deterministic Delegation:
- Task routing uses deterministic patterns:
- - Keyword-based matching with sorted rule evaluation
- - Hash-based verification of delegation rules
- - Reproducible routing decisions across environments
- - Fallback mechanisms for unmatched queries
-
- Attributes:
- support_agents: Registry of available support agents
- structured_dag_manager: Manager for DAG operations and upgrades
-
- Examples:
- >>> agent = StructuredMainAgent(
- ... model=ChatOpenAI(model="gpt-4"),
- ... prompts_dir="/path/to/prompts",
- ... verbose=True
- ... )
- >>>
- >>> # Delegate a governance task
- >>> result = agent.delegate_task("Analyze governance proposal for voting")
- >>>
- >>> # Upgrade a specific node
- >>> success = agent.upgrade_support_agent(
- ... "governance",
- ... enhanced_agent,
- ... NodeVersion(1, 1, 0)
- ... )
- >>>
- >>> # Export for blockchain storage
- >>> blockchain_data = agent.export_for_blockchain()
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- support_agents: Dict[str, SupportAgent] = {}
- structured_dag_manager: Optional[Any] = None
- delegation_hash: str = ""
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- self._setup_structured_support_agents()
- self._build_structured_dag()
-
- def _setup_structured_support_agents(self) -> None:
- """
- Setup default support agents for the structured DAG.
-
- This method initializes the core set of support agents that provide
- specialized capabilities for the AI system. Each agent is configured
- with specific domain expertise, task patterns, and delegation keywords.
-
- Default Support Agents:
- - Governance: Handles proposals, voting, and DAO operations
- - Analytics: Processes data analysis and reporting tasks
-
- Each agent is initialized with:
- - Semantic versioning starting at 1.0.0
- - Compatible upgrade policy for safe evolution
- - Domain-specific task patterns and keywords
- - Specialized architecture for handling their domain
-
- The setup ensures deterministic agent ordering and consistent
- initialization across different execution environments.
- """
- governance_agent = SupportAgent(
- name="governance",
- domain="governance",
- description="Structured governance agent for blockchain proposals",
- architecture={
- "task_flow": ["validate", "analyze", "execute", "confirm"],
- "decision_points": ["proposal_validity", "consensus_mechanism", "execution_safety"],
- "capabilities": ["proposal_validation", "consensus_coordination", "safe_execution"]
- },
- delegation_keywords=["governance", "proposal", "vote", "consensus"],
- task_patterns=["validate proposal", "coordinate consensus", "execute governance"]
- )
-
- analytics_agent = SupportAgent(
- name="analytics",
- domain="analytics",
- description="Structured analytics agent for data processing",
- architecture={
- "task_flow": ["collect", "process", "analyze", "report"],
- "decision_points": ["data_source", "analysis_method", "output_format"],
- "capabilities": ["data_collection", "statistical_analysis", "report_generation"]
- },
- delegation_keywords=["analytics", "data", "analysis", "report"],
- task_patterns=["analyze data", "generate report", "process metrics"]
- )
-
- self.support_agents = {
- "governance": governance_agent,
- "analytics": analytics_agent
- }
-
- def _build_structured_dag(self) -> None:
- """
- Build the structured DAG with current support agents.
-
- This method constructs the blockchain-native DAG architecture:
- 1. Creates StructuredDAGManager for controlled operations
- 2. Builds DAG with deterministic node ordering
- 3. Establishes routing and delegation patterns
- 4. Validates DAG structure and integrity
-
- The resulting DAG provides:
- - Deterministic execution paths for reproducible results
- - Individual node upgrade capabilities
- - Hash-based verification of structure integrity
- - Blockchain-compatible serialization format
-
- DAG Structure:
- - Router node for task delegation
- - Individual support agent nodes with versioning
- - Shared prompt and data source nodes
- - Deterministic edge connections
-
- Raises:
- ValueError: If DAG construction fails or validation errors occur
- """
- if not self.prompt_manager:
- return
-
- from talos.dag.structured_manager import StructuredDAGManager
-
- services = []
- if hasattr(self, 'services'):
- services = self.services
-
- tool_manager = ToolManager()
- if hasattr(self, 'tool_manager'):
- tool_manager = self.tool_manager
-
- self.structured_dag_manager = StructuredDAGManager()
-
- try:
- self.dag = self.structured_dag_manager.create_structured_dag(
- model=self.model, # type: ignore
- prompt_manager=self.prompt_manager,
- support_agents=self.support_agents,
- services=services,
- tool_manager=tool_manager,
- dataset_manager=getattr(self, 'dataset_manager', None),
- dag_name="structured_blockchain_dag"
- )
- except Exception as e:
- print(f"Warning: Could not build structured DAG: {e}")
-
- def upgrade_support_agent(
- self,
- domain: str,
- new_agent: SupportAgent,
- new_version: "NodeVersion",
- force: bool = False
- ) -> bool:
- """
- Upgrade a specific support agent with comprehensive validation.
-
- This method enables individual component upgrades in the blockchain-native
- AI system. It performs controlled upgrades while maintaining system integrity
- and deterministic behavior.
-
- Upgrade Process:
- 1. Validates the target domain exists and is upgradeable
- 2. Checks version compatibility against current upgrade policy
- 3. Performs the upgrade through the structured DAG manager
- 4. Updates support agent registry with new configuration
- 5. Rebuilds DAG structure with updated node
-
- Safety Measures:
- - Version compatibility validation prevents breaking changes
- - Upgrade policies enforce safe transition paths
- - Rollback capability preserved for recovery
- - DAG integrity maintained throughout process
-
- Args:
- domain: Domain identifier of the support agent to upgrade
- new_agent: Updated support agent configuration
- new_version: Target version for the upgrade
- force: Whether to bypass version compatibility checks
-
- Returns:
- True if upgrade succeeded, False if validation failed
-
- Examples:
- >>> enhanced_agent = SupportAgent(
- ... name="governance_v2",
- ... domain="governance",
- ... description="Enhanced governance with new features",
- ... # ... additional configuration
- ... )
- >>> success = agent.upgrade_support_agent(
- ... "governance",
- ... enhanced_agent,
- ... NodeVersion(1, 1, 0)
- ... )
- >>> if success:
- ... print("Governance agent upgraded successfully")
- """
- if not self.structured_dag_manager:
- return False
-
- success = self.structured_dag_manager.upgrade_node(domain, new_agent, new_version, force)
- if success:
- self.support_agents[domain] = new_agent
-
- return success
-
- def validate_upgrade(self, domain: str, new_version: "NodeVersion") -> Dict[str, Any]:
- """
- Validate if a support agent can be upgraded to the specified version.
-
- This method provides comprehensive upgrade validation before attempting
- actual upgrades. It helps prevent incompatible changes and ensures
- safe evolution of the AI system.
-
- Validation Checks:
- - Domain existence and upgrade capability
- - Version compatibility against upgrade policy
- - Semantic versioning rules enforcement
- - Breaking change detection
-
- Args:
- domain: Domain identifier of the support agent
- new_version: Proposed version for upgrade validation
-
- Returns:
- Dictionary containing detailed validation results:
- - "valid": Boolean indicating if upgrade is allowed
- - "reason": Detailed explanation of validation result
- - "current_version": Current version of the support agent
- - "upgrade_policy": Current upgrade policy in effect
- - "target_version": Proposed target version
-
- Examples:
- >>> result = agent.validate_upgrade("governance", NodeVersion(2, 0, 0))
- >>> if not result["valid"]:
- ... print(f"Upgrade blocked: {result['reason']}")
- >>> else:
- ... print("Upgrade validation passed")
- """
- if not self.structured_dag_manager:
- return {"valid": False, "reason": "No structured DAG manager"}
-
- return self.structured_dag_manager.validate_upgrade(domain, new_version)
-
- def rollback_node(self, domain: str, target_version: "NodeVersion") -> bool:
- """
- Rollback a support agent to a previous version.
-
- This method enables controlled rollback of individual components
- when issues are discovered after upgrades. It maintains system
- stability by allowing quick recovery to known-good states.
-
- Rollback Process:
- 1. Validates the target domain and version
- 2. Ensures target version is older than current version
- 3. Performs rollback through structured DAG manager
- 4. Updates support agent registry
- 5. Rebuilds DAG with rolled-back configuration
-
- Safety Measures:
- - Only allows rollback to older versions
- - Preserves DAG structural integrity
- - Maintains deterministic behavior
- - Updates all relevant hashes and metadata
-
- Args:
- domain: Domain identifier of the support agent
- target_version: Previous version to rollback to
-
- Returns:
- True if rollback succeeded, False if validation failed
-
- Examples:
- >>> success = agent.rollback_node("governance", NodeVersion(1, 0, 0))
- >>> if success:
- ... print("Governance agent rolled back successfully")
- """
- if not self.structured_dag_manager:
- return False
-
- return self.structured_dag_manager.rollback_node(domain, target_version)
-
- def get_node_status(self, domain: str) -> Dict[str, Any]:
- """
- Get detailed status of a specific support agent node.
-
- This method provides comprehensive information about individual
- support agents, including their current configuration, version
- status, and upgrade capabilities.
-
- Status Information:
- - Current version and upgrade policy
- - Node hash for blockchain verification
- - Delegation keywords and task patterns
- - Architecture configuration
- - Upgrade compatibility status
-
- Args:
- domain: Domain identifier of the support agent
-
- Returns:
- Dictionary containing detailed node status:
- - "version": Current semantic version
- - "upgrade_policy": Current upgrade policy
- - "node_hash": Blockchain verification hash
- - "delegation_keywords": Keywords for task routing
- - "task_patterns": Supported task patterns
- - "architecture": Agent architecture configuration
- - "error": Error message if node not found
-
- Examples:
- >>> status = agent.get_node_status("governance")
- >>> print(f"Governance agent v{status['version']}")
- >>> print(f"Upgrade policy: {status['upgrade_policy']}")
- """
- if not self.structured_dag_manager or domain not in self.structured_dag_manager.node_registry:
- return {"error": "Node not found"}
-
- node = self.structured_dag_manager.node_registry[domain]
- return {
- "node_id": node.node_id,
- "version": str(node.node_version),
- "domain": node.support_agent.domain,
- "architecture": node.support_agent.architecture,
- "delegation_keywords": node.support_agent.delegation_keywords,
- "upgrade_policy": node.upgrade_policy,
- "node_hash": node.node_hash
- }
-
- def get_structured_status(self) -> Dict[str, Any]:
- """
- Get comprehensive status of the structured DAG and all components.
-
- This method provides a complete overview of the blockchain-native
- AI system, including DAG structure, node status, and blockchain
- readiness indicators.
-
- Comprehensive Status:
- - DAG metadata (name, version, node count)
- - Individual node status and versions
- - Delegation hash and routing configuration
- - Edge and conditional edge mappings
- - Blockchain compatibility indicators
-
- Returns:
- Dictionary containing complete system status:
- - "dag_name": Name of the current DAG
- - "dag_version": Current DAG version
- - "total_nodes": Number of nodes in the DAG
- - "structured_nodes": Detailed information for each node
- - "delegation_hash": Current delegation hash
- - "blockchain_ready": Blockchain compatibility status
- - "edges": DAG edge configuration
- - "conditional_edges": Conditional routing rules
-
- Examples:
- >>> status = agent.get_structured_status()
- >>> print(f"System has {status['total_nodes']} nodes")
- >>> print(f"Blockchain ready: {status['blockchain_ready']}")
- >>> for node_id, info in status['structured_nodes'].items():
- ... print(f"{node_id}: v{info['version']}")
- """
- if not self.structured_dag_manager:
- return {"status": "No structured DAG manager"}
-
- return self.structured_dag_manager.get_structured_dag_status()
-
- def export_for_blockchain(self) -> Dict[str, Any]:
- """
- Export DAG configuration for blockchain storage.
-
- This method produces a deterministic, serializable representation
- of the entire DAG structure suitable for on-chain storage and
- verification. The export includes all node configurations,
- delegation rules, and integrity hashes.
-
- Returns:
- Dictionary containing blockchain-ready DAG configuration
- """
- if not self.structured_dag_manager:
- return {}
-
- return self.structured_dag_manager.export_for_blockchain()
-
- def delegate_task(self, query: str, context: Optional[Dict[str, Any]] = None) -> Any:
- """
- Delegate task using structured DAG execution.
-
- This method routes tasks through the structured DAG architecture,
- enabling deterministic delegation to appropriate support agents
- based on the configured routing rules.
-
- Args:
- query: Task query to be delegated
- context: Optional context for task execution
-
- Returns:
- Results from DAG execution or error message
- """
- if self.dag:
- try:
- from talos.dag.nodes import GraphState
- from langchain_core.messages import HumanMessage
-
- initial_state: GraphState = {
- "messages": [HumanMessage(content=query)],
- "context": context or {},
- "current_query": query,
- "results": {},
- "metadata": {}
- }
-
- result = self.dag.execute(initial_state)
- return result.get("results", {})
- except Exception as e:
- return f"DAG execution failed: {e}"
-
- return f"Structured main agent handling: {query}"
diff --git a/src/talos/core/job_scheduler.py b/src/talos/core/job_scheduler.py
deleted file mode 100644
index 78dcd8f6..00000000
--- a/src/talos/core/job_scheduler.py
+++ /dev/null
@@ -1,223 +0,0 @@
-from __future__ import annotations
-
-import logging
-from typing import Any, Dict, List, Optional
-
-from apscheduler.schedulers.asyncio import AsyncIOScheduler
-from apscheduler.triggers.cron import CronTrigger
-from apscheduler.triggers.date import DateTrigger
-from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
-
-from talos.core.scheduled_job import ScheduledJob
-from talos.hypervisor.supervisor import Supervisor
-
-logger = logging.getLogger(__name__)
-
-
-class JobScheduler(BaseModel):
- """
- Manages scheduled jobs for the MainAgent using APScheduler.
-
- Provides functionality to:
- - Register and manage scheduled jobs
- - Execute jobs with supervision
- - Handle job lifecycle (start, stop, pause, resume)
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- supervisor: Optional[Supervisor] = Field(default=None, description="Supervisor for approving job executions")
- timezone: str = Field("UTC", description="Timezone for job scheduling")
-
- _scheduler: AsyncIOScheduler = PrivateAttr()
- _jobs: Dict[str, ScheduledJob] = PrivateAttr(default_factory=dict)
- _running: bool = PrivateAttr(default=False)
-
- def model_post_init(self, __context: Any) -> None:
- self._scheduler = AsyncIOScheduler(timezone=self.timezone)
- self._jobs = {}
- self._running = False
-
- def register_job(self, job: ScheduledJob) -> None:
- """
- Register a scheduled job with the scheduler.
-
- Args:
- job: The ScheduledJob instance to register
- """
- if job.name in self._jobs:
- logger.warning(f"Job '{job.name}' already registered, replacing existing job")
- self.unregister_job(job.name)
-
- self._jobs[job.name] = job
-
- if not job.enabled:
- logger.info(f"Job '{job.name}' registered but disabled")
- return
-
- if job.is_recurring() and job.cron_expression:
- trigger = CronTrigger.from_crontab(job.cron_expression, timezone=self.timezone)
- self._scheduler.add_job(
- func=self._execute_job_with_supervision,
- trigger=trigger,
- args=[job.name],
- id=job.name,
- max_instances=job.max_instances,
- replace_existing=True,
- )
- logger.info(f"Registered recurring job '{job.name}' with cron: {job.cron_expression}")
-
- elif job.is_one_time() and job.execute_at:
- trigger = DateTrigger(run_date=job.execute_at, timezone=self.timezone)
- self._scheduler.add_job(
- func=self._execute_job_with_supervision,
- trigger=trigger,
- args=[job.name],
- id=job.name,
- max_instances=job.max_instances,
- replace_existing=True,
- )
- logger.info(f"Registered one-time job '{job.name}' for: {job.execute_at}")
-
- def unregister_job(self, job_name: str) -> bool:
- """
- Unregister a scheduled job.
-
- Args:
- job_name: Name of the job to unregister
-
- Returns:
- True if job was found and removed, False otherwise
- """
- if job_name not in self._jobs:
- logger.warning(f"Job '{job_name}' not found for unregistration")
- return False
-
- try:
- self._scheduler.remove_job(job_name)
- except Exception as e:
- logger.warning(f"Failed to remove job '{job_name}' from scheduler: {e}")
-
- del self._jobs[job_name]
- logger.info(f"Unregistered job '{job_name}'")
- return True
-
- def get_job(self, job_name: str) -> Optional[ScheduledJob]:
- """Get a registered job by name."""
- return self._jobs.get(job_name)
-
- def list_jobs(self) -> List[ScheduledJob]:
- """Get all registered jobs."""
- return list(self._jobs.values())
-
- def start(self) -> None:
- """Start the job scheduler."""
- if self._running:
- logger.warning("Job scheduler is already running")
- return
-
- try:
- self._scheduler.start()
- self._running = True
- logger.info("Job scheduler started")
- except RuntimeError as e:
- if "no current event loop" in str(e).lower():
- logger.warning(f"No event loop available for job scheduler: {e}")
- logger.info("Job scheduler will remain inactive (suitable for testing)")
- else:
- logger.error(f"Failed to start job scheduler: {e}")
- raise
-
- def stop(self) -> None:
- """Stop the job scheduler."""
- if not self._running:
- logger.warning("Job scheduler is not running")
- return
-
- self._scheduler.shutdown()
- self._running = False
- logger.info("Job scheduler stopped")
-
- def pause_job(self, job_name: str) -> bool:
- """
- Pause a specific job.
-
- Args:
- job_name: Name of the job to pause
-
- Returns:
- True if job was found and paused, False otherwise
- """
- if not self._running:
- logger.warning("Job scheduler is not running, cannot pause job")
- return False
-
- try:
- self._scheduler.pause_job(job_name)
- logger.info(f"Paused job '{job_name}'")
- return True
- except Exception as e:
- logger.error(f"Failed to pause job '{job_name}': {e}")
- return False
-
- def resume_job(self, job_name: str) -> bool:
- """
- Resume a specific job.
-
- Args:
- job_name: Name of the job to resume
-
- Returns:
- True if job was found and resumed, False otherwise
- """
- if not self._running:
- logger.warning("Job scheduler is not running, cannot resume job")
- return False
-
- try:
- self._scheduler.resume_job(job_name)
- logger.info(f"Resumed job '{job_name}'")
- return True
- except Exception as e:
- logger.error(f"Failed to resume job '{job_name}': {e}")
- return False
-
- def is_running(self) -> bool:
- """Check if the scheduler is running."""
- return self._running
-
- async def _execute_job_with_supervision(self, job_name: str) -> None:
- """
- Execute a job with optional supervision.
-
- Args:
- job_name: Name of the job to execute
- """
- job = self._jobs.get(job_name)
- if not job:
- logger.error(f"Job '{job_name}' not found for execution")
- return
-
- if not job.enabled:
- logger.info(f"Job '{job_name}' is disabled, skipping execution")
- return
-
- logger.info(f"Executing scheduled job: {job_name}")
-
- try:
- if self.supervisor:
- logger.info(f"Requesting supervision approval for job: {job_name}")
-
- await job.run()
- logger.info(f"Job '{job_name}' completed successfully")
-
- if job.is_one_time():
- self.unregister_job(job_name)
- logger.info(f"One-time job '{job_name}' removed after execution")
-
- except Exception as e:
- logger.error(f"Job '{job_name}' failed with error: {e}")
-
- if job.is_one_time():
- self.unregister_job(job_name)
- logger.info(f"Failed one-time job '{job_name}' removed")
diff --git a/src/talos/core/main_agent.py b/src/talos/core/main_agent.py
deleted file mode 100644
index 8b354743..00000000
--- a/src/talos/core/main_agent.py
+++ /dev/null
@@ -1,409 +0,0 @@
-from __future__ import annotations
-
-import os
-from datetime import datetime
-from typing import Any, List, Optional
-
-from langchain_core.language_models import BaseChatModel
-from langchain_core.tools import BaseTool, tool
-
-from talos.core.agent import Agent
-from talos.core.job_scheduler import JobScheduler
-from talos.core.scheduled_job import ScheduledJob
-
-from talos.core.startup_task_manager import StartupTaskManager
-from talos.data.dataset_manager import DatasetManager
-from talos.hypervisor.hypervisor import Hypervisor
-from talos.models.services import Ticket
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.services.abstract.service import Service
-from talos.settings import GitHubSettings
-from talos.skills.base import Skill
-from talos.skills.codebase_evaluation import CodebaseEvaluationSkill
-from talos.skills.codebase_implementation import CodebaseImplementationSkill
-from talos.skills.cryptography import CryptographySkill
-from talos.skills.pr_review import PRReviewSkill
-from talos.skills.proposals import ProposalsSkill
-from talos.skills.twitter_influence import TwitterInfluenceSkill
-from talos.skills.twitter_sentiment import TwitterSentimentSkill
-from talos.tools.arbiscan import ArbiScanABITool, ArbiScanSourceCodeTool
-from talos.tools.document_loader import DatasetSearchTool, DocumentLoaderTool
-from talos.tools.github.tools import GithubTools
-from talos.tools.tool_manager import ToolManager
-
-
-class MainAgent(Agent):
- """
- A top-level agent that delegates to a conversational agent and a research agent.
- Also manages scheduled jobs for autonomous execution.
- """
-
- skills: list[Skill] = []
- services: list[Service] = []
- prompts_dir: str
- model: BaseChatModel
- is_main_agent: bool = True
- prompt_manager: Optional[PromptManager] = None
- dataset_manager: Optional[DatasetManager] = None
- job_scheduler: Optional[JobScheduler] = None
- scheduled_jobs: List[ScheduledJob] = []
- startup_task_manager: Optional[StartupTaskManager] = None
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- self._setup_prompt_manager()
- self._ensure_user_id()
- self._setup_memory()
- self._setup_skills_and_services()
- self._setup_hypervisor()
- self._setup_dataset_manager()
- self._setup_tool_manager()
- self._setup_job_scheduler()
- self._setup_startup_task_manager()
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def _setup_prompt_manager(self) -> None:
- if not self.prompt_manager:
- self.prompt_manager = FilePromptManager(self.prompts_dir)
-
- use_voice_enhanced = os.getenv("TALOS_USE_VOICE_ENHANCED", "false").lower() == "true"
-
- if use_voice_enhanced:
- self._setup_voice_enhanced_prompt()
- else:
- self.set_prompt(["main_agent_prompt", "general_agent_prompt"])
-
- def _setup_voice_enhanced_prompt(self) -> None:
- """Setup voice-enhanced prompt by combining voice analysis with main prompt."""
- try:
- if not self.prompt_manager:
- raise ValueError("Prompt manager not initialized")
-
- from talos.skills.twitter_voice import TwitterVoiceSkill
-
- voice_skill = TwitterVoiceSkill()
- voice_result = voice_skill.run(username="talos_is")
-
- main_prompt = self.prompt_manager.get_prompt("main_agent_prompt")
- if not main_prompt:
- raise ValueError("Could not find main_agent_prompt")
-
- voice_enhanced_template = f"{voice_result['voice_prompt']}\n\n{main_prompt.template}"
-
- from talos.prompts.prompt import Prompt
-
- enhanced_prompt = Prompt(
- name="voice_enhanced_main_agent",
- template=voice_enhanced_template,
- input_variables=main_prompt.input_variables,
- )
-
- # Add the enhanced prompt to the manager if it's a FilePromptManager
- if hasattr(self.prompt_manager, "prompts"):
- self.prompt_manager.prompts["voice_enhanced_main_agent"] = enhanced_prompt
-
- self.set_prompt(["voice_enhanced_main_agent", "general_agent_prompt"])
-
- if self._get_verbose_level() >= 1:
- print(f"Voice integration enabled using {voice_result['voice_source']}")
-
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"Voice integration failed, falling back to default prompts: {e}")
- self.set_prompt(["main_agent_prompt", "general_agent_prompt"])
-
- def _ensure_user_id(self) -> None:
- """Ensure user_id is set, generate temporary one if needed."""
- if not self.user_id and self.use_database_memory:
- import uuid
-
- self.user_id = str(uuid.uuid4())
-
- def _setup_memory(self) -> None:
- """Initialize memory with database or file backend based on configuration."""
- if not self.memory:
- from langchain_openai import OpenAIEmbeddings
-
- from talos.core.memory import Memory
-
- embeddings_model = OpenAIEmbeddings()
-
- if self.use_database_memory:
- from talos.database.session import init_database
-
- init_database()
-
- session_id = self.session_id or "cli-session"
-
- self.memory = Memory(
- embeddings_model=embeddings_model,
- user_id=self.user_id,
- session_id=session_id,
- use_database=True,
- auto_save=True,
- verbose=self.verbose,
- )
- else:
- from pathlib import Path
-
- memory_dir = Path("memory")
- memory_dir.mkdir(exist_ok=True)
-
- self.memory = Memory(
- file_path=memory_dir / "memories.json",
- embeddings_model=embeddings_model,
- history_file_path=memory_dir / "history.json",
- use_database=False,
- auto_save=True,
- verbose=self.verbose,
- )
-
- def _setup_skills_and_services(self) -> None:
- if not self.prompt_manager:
- raise ValueError("Prompt manager not initialized.")
- services: list[Service] = []
- devin_service = None
- skills: list[Skill] = [
- ProposalsSkill(llm=self.model, prompt_manager=self.prompt_manager),
- CryptographySkill(),
- CodebaseEvaluationSkill(llm=self.model, prompt_manager=self.prompt_manager),
- ]
-
- try:
- import os
-
- from talos.services.implementations.devin import DevinService
-
- devin_api_key = os.getenv("DEVIN_API_KEY")
- if devin_api_key:
- devin_service = DevinService(api_key=devin_api_key)
- services.append(devin_service)
- except (ImportError, ValueError):
- pass # Devin API key not available, skip Devin service
-
- github_tools = None
- try:
- github_settings = GitHubSettings()
- github_token = github_settings.GITHUB_API_TOKEN
- if github_token:
- github_tools = GithubTools(token=github_token)
- skills.append(
- PRReviewSkill(llm=self.model, prompt_manager=self.prompt_manager, github_tools=github_tools)
- )
- except ValueError:
- pass # GitHub token not available, skip GitHub-dependent skills
-
- if devin_service:
- skills.append(
- CodebaseImplementationSkill(
- llm=self.model,
- prompt_manager=self.prompt_manager,
- devin_service=devin_service,
- github_tools=github_tools,
- )
- )
-
- try:
- from talos.tools.twitter_client import TwitterConfig
-
- TwitterConfig() # This will raise ValueError if TWITTER_BEARER_TOKEN is not set
- skills.extend(
- [
- TwitterSentimentSkill(prompt_manager=self.prompt_manager),
- TwitterInfluenceSkill(llm=self.model, prompt_manager=self.prompt_manager),
- ]
- )
- except ValueError:
- pass # Twitter token not available, skip Twitter-dependent skills
-
- self.skills = skills
- self.services = services
-
- def _setup_hypervisor(self) -> None:
- if not self.prompt_manager:
- raise ValueError("Prompt manager not initialized.")
- hypervisor = Hypervisor(
- model=self.model, prompts_dir=self.prompts_dir, prompt_manager=self.prompt_manager, schema=None
- )
- self.add_supervisor(hypervisor)
- hypervisor.register_agent(self)
-
- def _setup_dataset_manager(self) -> None:
- if not self.dataset_manager:
- if self.use_database_memory:
- from talos.database.session import init_database
-
- init_database()
-
- self.dataset_manager = DatasetManager(
- verbose=self.verbose,
- user_id=self.user_id,
- session_id=self.session_id or "cli-session",
- use_database=True,
- )
- else:
- self.dataset_manager = DatasetManager(verbose=self.verbose)
-
- def _setup_tool_manager(self) -> None:
- tool_manager = ToolManager()
- for skill in self.skills:
- tool_manager.register_tool(skill.create_ticket_tool())
- tool_manager.register_tool(self._get_ticket_status_tool())
- tool_manager.register_tool(self._add_memory_tool())
-
- if self.dataset_manager:
- tool_manager.register_tool(DocumentLoaderTool(self.dataset_manager))
- tool_manager.register_tool(DatasetSearchTool(self.dataset_manager))
- else:
- from langchain_openai import OpenAIEmbeddings
-
- basic_dataset_manager = DatasetManager(verbose=self.verbose, embeddings=OpenAIEmbeddings())
- tool_manager.register_tool(DocumentLoaderTool(basic_dataset_manager))
- tool_manager.register_tool(DatasetSearchTool(basic_dataset_manager))
-
- tool_manager.register_tool(ArbiScanSourceCodeTool())
- tool_manager.register_tool(ArbiScanABITool())
-
- self.tool_manager = tool_manager
-
- def _setup_job_scheduler(self) -> None:
- """Initialize the job scheduler and register any predefined scheduled jobs."""
- if not self.job_scheduler:
- self.job_scheduler = JobScheduler(supervisor=self.supervisor, timezone="UTC")
-
- for job in self.scheduled_jobs:
- self.job_scheduler.register_job(job)
-
- self.job_scheduler.start()
-
- def _setup_startup_task_manager(self) -> None:
- """Initialize the startup task manager and discover tasks from files."""
- if not self.startup_task_manager:
- self.startup_task_manager = StartupTaskManager(job_scheduler=self.job_scheduler)
-
- import logging
- logger = logging.getLogger(__name__)
- logger.info(f"Startup task manager initialized with {len(self.startup_task_manager.discovered_tasks)} discovered tasks")
-
- def add_scheduled_job(self, job: ScheduledJob) -> None:
- """
- Add a scheduled job to the agent.
-
- Args:
- job: The ScheduledJob instance to add
- """
- if not self.job_scheduler:
- raise ValueError("Job scheduler not initialized")
-
- self.scheduled_jobs.append(job)
- self.job_scheduler.register_job(job)
-
- def remove_scheduled_job(self, job_name: str) -> bool:
- """
- Remove a scheduled job from the agent.
-
- Args:
- job_name: Name of the job to remove
-
- Returns:
- True if job was found and removed, False otherwise
- """
- if not self.job_scheduler:
- return False
-
- success = self.job_scheduler.unregister_job(job_name)
-
- self.scheduled_jobs = [job for job in self.scheduled_jobs if job.name != job_name]
-
- return success
-
- def list_scheduled_jobs(self) -> List[ScheduledJob]:
- """Get all scheduled jobs."""
- return self.scheduled_jobs.copy()
-
- def get_scheduled_job(self, job_name: str) -> Optional[ScheduledJob]:
- """Get a scheduled job by name."""
- if not self.job_scheduler:
- return None
- return self.job_scheduler.get_job(job_name)
-
- def pause_scheduled_job(self, job_name: str) -> bool:
- """Pause a scheduled job."""
- if not self.job_scheduler:
- return False
- return self.job_scheduler.pause_job(job_name)
-
- def resume_scheduled_job(self, job_name: str) -> bool:
- """Resume a scheduled job."""
- if not self.job_scheduler:
- return False
- return self.job_scheduler.resume_job(job_name)
-
- def _add_memory_tool(self) -> BaseTool:
- @tool
- def add_memory(description: str, metadata: Optional[dict] = None) -> str:
- """
- Adds a memory to the agent's long-term memory.
-
- Args:
- description: A description of the memory to add.
- metadata: Optional metadata to associate with the memory.
-
- Returns:
- A confirmation message.
- """
- if self.memory:
- self.memory.add_memory(description, metadata)
- return f"Added to memory: {description}"
- return "Memory not configured for this agent."
-
- return add_memory
-
- def _get_ticket_status_tool(self) -> BaseTool:
- @tool
- def get_ticket_status(service_name: str, ticket_id: str) -> Ticket:
- """
- Get the status of a ticket.
-
- Args:
- service_name: The name of the service that the ticket was created for.
- ticket_id: The ID of the ticket.
-
- Returns:
- The ticket object.
- """
- skill = None
- for s in self.skills:
- if s.name == service_name:
- skill = s
- break
- if not skill:
- raise ValueError(f"Skill '{service_name}' not found.")
- ticket = skill.get_ticket_status(ticket_id)
- if not ticket:
- raise ValueError(f"Ticket '{ticket_id}' not found.")
- return ticket
-
- return get_ticket_status
-
- def _build_context(self, query: str, **kwargs) -> dict:
- base_context = super()._build_context(query, **kwargs)
-
- active_tickets = []
- for skill in self.skills:
- active_tickets.extend(skill.get_all_tickets())
- ticket_info = [f"- {ticket.ticket_id}: last updated at {ticket.updated_at}" for ticket in active_tickets]
-
- main_agent_context = {
- "time": datetime.now().isoformat(),
- "available_services": ", ".join([service.name for service in self.services]),
- "active_tickets": " ".join(ticket_info),
- }
-
- return {**base_context, **main_agent_context}
diff --git a/src/talos/core/memory.py b/src/talos/core/memory.py
deleted file mode 100644
index 427244e3..00000000
--- a/src/talos/core/memory.py
+++ /dev/null
@@ -1,400 +0,0 @@
-import json
-import time
-from dataclasses import dataclass, field
-from pathlib import Path
-from typing import List, Optional, TYPE_CHECKING, Any, Union
-
-from langchain_core.embeddings import Embeddings
-from langchain_core.messages import BaseMessage, messages_from_dict, messages_to_dict
-
-if TYPE_CHECKING:
- from langgraph.store.memory import InMemoryStore
- from langmem import create_memory_store_manager, create_memory_manager
-
-try:
- from langgraph.store.memory import InMemoryStore
- from langmem import create_memory_store_manager, create_memory_manager
- LANGMEM_AVAILABLE = True
-except ImportError:
- InMemoryStore = Any # type: ignore
- create_memory_store_manager = Any # type: ignore
- create_memory_manager = Any # type: ignore
- LANGMEM_AVAILABLE = False
-
-
-@dataclass
-class MemoryRecord:
- timestamp: float
- description: str
- metadata: dict = field(default_factory=dict)
- embedding: Optional[List[float]] = None
-
-
-class Memory:
- """
- A class to handle the saving and loading of an agent's memories using LangMem.
- Supports both SQLite (default) and file-based backends.
- """
-
- def __init__(
- self,
- file_path: Optional[Path] = None,
- embeddings_model: Optional[Embeddings] = None,
- history_file_path: Optional[Path] = None,
- batch_size: int = 10,
- auto_save: bool = True,
- user_id: Optional[str] = None,
- session_id: Optional[str] = None,
- use_database: bool = True,
- verbose: Union[bool, int] = False,
- similarity_threshold: float = 0.85,
- ):
- self.file_path = file_path
- self.history_file_path = history_file_path
- self.embeddings_model = embeddings_model
- self.batch_size = batch_size
- self.auto_save = auto_save
- self.user_id = user_id
- self.session_id = session_id or "default-session"
- self.use_database = use_database
- self.verbose = verbose
- self.similarity_threshold = similarity_threshold
- self.memories: List[MemoryRecord] = []
- self._unsaved_count = 0
- self._langmem_manager = None
- self._store = None
- self._db_backend = None
-
- if self.use_database and LANGMEM_AVAILABLE and self.embeddings_model:
- self._setup_langmem_sqlite()
- elif self.use_database and not LANGMEM_AVAILABLE and self.file_path:
- if self._get_verbose_level() >= 1:
- print("⚠ LangMem not available, falling back to file-based storage")
- self.use_database = False
- self._setup_langmem_file()
- elif self.use_database:
- raise ValueError("Database backend requested but LangMem is not available. Please install langmem or use file-based storage.")
- elif not self.use_database and self.file_path:
- self._setup_langmem_file()
- else:
- if self.file_path:
- self._setup_langmem_file()
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def _setup_langmem_sqlite(self):
- """Setup LangMem with SQLite backend."""
- if not LANGMEM_AVAILABLE:
- if self._get_verbose_level() >= 1:
- print("⚠ LangMem not available, cannot use database backend")
- raise ValueError("LangMem is required for database backend but is not available")
- return
-
- try:
- self._store = InMemoryStore(
- index={
- "dims": 1536,
- "embed": "openai:text-embedding-3-small"
- }
- )
-
- self._langmem_manager = create_memory_store_manager(
- "gpt-5",
- namespace=("memories", self.user_id or "default"),
- store=self._store
- )
-
- if self._get_verbose_level() >= 1:
- print("✓ LangMem initialized with SQLite backend")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"⚠ SQLite setup failed: {e}")
- raise ValueError(f"Failed to setup LangMem SQLite backend: {e}")
-
- def _setup_langmem_file(self):
- """Setup LangMem with file-based backend."""
- if not LANGMEM_AVAILABLE:
- if self._get_verbose_level() >= 1:
- print("⚠ LangMem not available, using file-only storage")
- if self.file_path:
- self.file_path.parent.mkdir(parents=True, exist_ok=True)
- if not self.file_path.exists():
- self.file_path.write_text("[]")
- self._load_file_memories()
- return
-
- try:
- self._langmem_manager = create_memory_manager("gpt-5")
- if self.file_path:
- self.file_path.parent.mkdir(parents=True, exist_ok=True)
- if not self.file_path.exists():
- self.file_path.write_text("[]")
- self._load_file_memories()
-
- if self._get_verbose_level() >= 1:
- print("✓ LangMem initialized with file backend")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"✗ LangMem setup failed: {e}")
- if self.file_path:
- self.file_path.parent.mkdir(parents=True, exist_ok=True)
- if not self.file_path.exists():
- self.file_path.write_text("[]")
- self._load_file_memories()
-
- def _load_file_memories(self):
- """Load existing memories from file."""
- if self.file_path and self.file_path.exists():
- try:
- with open(self.file_path, "r") as f:
- data = json.load(f)
- self.memories = [MemoryRecord(**d) for d in data]
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"⚠ Failed to load memories: {e}")
-
- async def add_memory_async(self, description: str, metadata: Optional[dict] = None):
- """Add a memory using LangMem."""
- if self._langmem_manager and self._store:
- try:
- config = {"configurable": {"langgraph_user_id": self.user_id or "default"}}
- conversation = [{"role": "user", "content": description}]
- await self._langmem_manager.ainvoke({"messages": conversation}, config=config)
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved: {description}")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"✗ Failed to save memory: {e}")
- elif self._langmem_manager:
- try:
- conversation = [{"role": "user", "content": description}]
- await self._langmem_manager.ainvoke({"messages": conversation})
-
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved: {description}")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"✗ Failed to save memory: {e}")
- else:
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved (fallback): {description}")
-
- def add_memory(self, description: str, metadata: Optional[dict] = None):
- """Add memory with backward compatibility."""
- if self._langmem_manager and self._store:
- try:
- config = {"configurable": {"langgraph_user_id": self.user_id or "default"}}
- conversation = [{"role": "user", "content": description}]
- self._langmem_manager.invoke({"messages": conversation}, config=config)
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved to LangMem store: {description}")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"✗ LangMem store failed, using fallback: {e}")
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
- elif self._langmem_manager:
- try:
- conversation = [{"role": "user", "content": description}]
- self._langmem_manager.invoke({"messages": conversation})
-
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved to LangMem: {description}")
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"✗ LangMem failed, using fallback: {e}")
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
- else:
- memory = MemoryRecord(
- timestamp=time.time(),
- description=description,
- metadata=metadata or {},
- )
- self.memories.append(memory)
- self._unsaved_count += 1
-
- if self.auto_save and self._unsaved_count >= self.batch_size:
- self.flush()
-
- if self._get_verbose_level() >= 1:
- print(f"✓ Memory saved (fallback): {description}")
-
- async def search_async(self, query: str, k: int = 5) -> List[MemoryRecord]:
- """Search memories using LangMem."""
- if self._langmem_manager and self._store:
- try:
- config = {"configurable": {"langgraph_user_id": self.user_id or "default"}}
- results = await self._langmem_manager.asearch(query=query, config=config)
-
- memory_records = []
- for result in results[:k]:
- memory_records.append(MemoryRecord(
- timestamp=time.time(),
- description=str(result),
- metadata={},
- ))
-
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1 and memory_records:
- print(f"🔍 Memory search: found {len(memory_records)} relevant memories")
- if verbose_level >= 2:
- for i, memory in enumerate(memory_records, 1):
- print(f" {i}. {memory.description}")
- if memory.metadata:
- print(f" Metadata: {memory.metadata}")
-
- return memory_records
- except Exception as e:
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"✗ Search failed: {e}")
- if verbose_level >= 2:
- print(f" Error details: {str(e)}")
- print(f" Query: {query}")
- return []
- else:
- if not self.memories:
- return []
-
- results = []
- query_lower = query.lower()
- for memory in self.memories:
- if query_lower in memory.description.lower():
- results.append(memory)
-
- return results[:k]
-
- def search(self, query: str, k: int = 5) -> List[MemoryRecord]:
- """Search with backward compatibility."""
- import asyncio
- try:
- return asyncio.run(self.search_async(query, k))
- except Exception:
- return []
-
- def list_all(self, filter_user_id: Optional[str] = None) -> List[MemoryRecord]:
- """List all memories."""
- if self._store:
- if self._get_verbose_level() >= 1:
- print("📋 Listed memories from SQLite store")
- return []
- else:
- results = self.memories.copy()
- results.sort(key=lambda x: x.timestamp, reverse=True)
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1 and results:
- print(f"📋 Listed {len(results)} memories")
- if verbose_level >= 2:
- for i, memory in enumerate(results[:5], 1):
- print(f" {i}. {memory.description}")
- if memory.metadata:
- print(f" Metadata: {memory.metadata}")
- if len(results) > 5:
- print(f" ... and {len(results) - 5} more memories")
- return results
-
- def load_history(self) -> List[BaseMessage]:
- """Load conversation history."""
- if not self.history_file_path or not self.history_file_path.exists():
- return []
- try:
- with open(self.history_file_path, "r") as f:
- dicts = json.load(f)
- return messages_from_dict(dicts)
- except Exception:
- return []
-
- def save_history(self, messages: List[BaseMessage]):
- """Save conversation history."""
- if not self.history_file_path:
- return
- try:
- if not self.history_file_path.exists():
- self.history_file_path.parent.mkdir(parents=True, exist_ok=True)
- self.history_file_path.touch()
- dicts = messages_to_dict(messages)
- with open(self.history_file_path, "w") as f:
- json.dump(dicts, f, indent=4)
- except Exception as e:
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"⚠ Failed to save history: {e}")
- if verbose_level >= 2:
- print(f" Error details: {str(e)}")
- print(f" History file: {self.history_file_path}")
-
- def flush(self):
- """Manually save all unsaved memories to disk."""
- if self._unsaved_count > 0 and self.file_path:
- try:
- with open(self.file_path, "w") as f:
- json.dump([m.__dict__ for m in self.memories], f, indent=4)
- self._unsaved_count = 0
- except Exception as e:
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"⚠ Failed to flush memories: {e}")
- if verbose_level >= 2:
- print(f" Error details: {str(e)}")
- print(f" Memory file: {self.file_path}")
-
- def __del__(self):
- """Ensure data is saved when object is destroyed."""
- if hasattr(self, "_unsaved_count") and self._unsaved_count > 0:
- self.flush()
diff --git a/src/talos/core/scheduled_job.py b/src/talos/core/scheduled_job.py
deleted file mode 100644
index 0c57d96e..00000000
--- a/src/talos/core/scheduled_job.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from __future__ import annotations
-
-import logging
-from abc import ABC, abstractmethod
-from datetime import datetime
-from typing import Any, Optional
-
-from pydantic import BaseModel, ConfigDict, Field
-
-logger = logging.getLogger(__name__)
-
-
-class ScheduledJob(BaseModel, ABC):
- """
- Abstract base class for scheduled jobs that can be executed by the MainAgent.
-
- Jobs can be scheduled using either:
- - A cron expression for recurring execution
- - A specific datetime for one-time execution
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str = Field(..., description="Unique name for this scheduled job")
- description: str = Field(..., description="Human-readable description of what this job does")
- cron_expression: Optional[str] = Field(
- None, description="Cron expression for recurring jobs (e.g., '0 9 * * *' for daily at 9 AM)"
- )
- execute_at: Optional[datetime] = Field(default=None, description="Specific datetime for one-time execution")
- enabled: bool = Field(default=True, description="Whether this job is enabled for execution")
- max_instances: int = Field(default=1, description="Maximum number of concurrent instances of this job")
-
- def model_post_init(self, __context: Any) -> None:
- if not self.cron_expression and not self.execute_at:
- raise ValueError("Either cron_expression or execute_at must be provided")
- if self.cron_expression and self.execute_at:
- raise ValueError("Only one of cron_expression or execute_at should be provided")
-
- @abstractmethod
- async def run(self, **kwargs: Any) -> Any:
- """
- Execute the scheduled job.
-
- This method should contain the actual logic for the job.
- It will be called by the scheduler when the job is triggered.
-
- Args:
- **kwargs: Additional arguments that may be passed to the job
-
- Returns:
- Any result from the job execution
- """
- pass
-
- def is_recurring(self) -> bool:
- """Check if this is a recurring job (has cron expression)."""
- return self.cron_expression is not None
-
- def is_one_time(self) -> bool:
- """Check if this is a one-time job (has execute_at datetime)."""
- return self.execute_at is not None
-
- def should_execute_now(self) -> bool:
- """
- Check if this job should execute now (for one-time jobs).
- Only relevant for one-time jobs.
- """
- if not self.is_one_time() or not self.execute_at:
- return False
- return datetime.now() >= self.execute_at
-
- def __str__(self) -> str:
- schedule_info = self.cron_expression if self.cron_expression else f"at {self.execute_at}"
- return f"ScheduledJob(name='{self.name}', schedule='{schedule_info}', enabled={self.enabled})"
diff --git a/src/talos/core/startup_task.py b/src/talos/core/startup_task.py
deleted file mode 100644
index 5532ba34..00000000
--- a/src/talos/core/startup_task.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-import hashlib
-import json
-import logging
-from abc import ABC, abstractmethod
-from datetime import datetime
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, ConfigDict
-
-logger = logging.getLogger(__name__)
-
-
-class StartupTask(BaseModel, ABC):
- """
- Abstract base class for startup tasks that can be executed by the daemon.
-
- Tasks are identified by content-based hashes and tracked for completion.
- They can be one-time or recurring, similar to database migrations.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str = Field(..., description="Unique name for this startup task")
- description: str = Field(..., description="Human-readable description of what this task does")
- task_hash: Optional[str] = Field(None, description="Content-based hash for task identification")
- created_at: datetime = Field(default_factory=datetime.now, description="When this task was created")
- execute_at: Optional[datetime] = Field(None, description="Specific datetime for one-time execution")
- cron_expression: Optional[str] = Field(None, description="Cron expression for recurring tasks")
- enabled: bool = Field(True, description="Whether this task is enabled for execution")
-
- def model_post_init(self, __context: Any) -> None:
- if not self.task_hash:
- self.task_hash = self.generate_hash()
-
- if not self.cron_expression and not self.execute_at:
- self.execute_at = datetime.now()
-
- @abstractmethod
- async def run(self, **kwargs: Any) -> Any:
- """
- Execute the startup task.
-
- This method should contain the actual logic for the task.
- It should be idempotent - safe to run multiple times.
-
- Args:
- **kwargs: Additional arguments that may be passed to the task
-
- Returns:
- Any result from the task execution
- """
- pass
-
- def generate_hash(self) -> str:
- """
- Generate a content-based hash for this task.
-
- Returns:
- Hexadecimal SHA-256 hash of task content
- """
- task_data = {
- "name": self.name,
- "description": self.description,
- "execute_at": self.execute_at.isoformat() if self.execute_at else None,
- "cron_expression": self.cron_expression,
- }
- task_json = json.dumps(task_data, sort_keys=True)
- return hashlib.sha256(task_json.encode()).hexdigest()[:16]
-
- def is_recurring(self) -> bool:
- """Check if this is a recurring task (has cron expression)."""
- return self.cron_expression is not None
-
- def is_one_time(self) -> bool:
- """Check if this is a one-time task (has execute_at datetime)."""
- return self.execute_at is not None
-
- def should_execute_now(self) -> bool:
- """Check if this task should execute now (for one-time tasks)."""
- if not self.is_one_time() or not self.execute_at:
- return False
- return datetime.now() >= self.execute_at
-
- def __str__(self) -> str:
- schedule_info = self.cron_expression if self.cron_expression else f"at {self.execute_at}"
- return f"StartupTask(name='{self.name}', hash='{self.task_hash}', schedule='{schedule_info}', enabled={self.enabled})"
diff --git a/src/talos/core/startup_task_manager.py b/src/talos/core/startup_task_manager.py
deleted file mode 100644
index 42d3e41a..00000000
--- a/src/talos/core/startup_task_manager.py
+++ /dev/null
@@ -1,305 +0,0 @@
-from __future__ import annotations
-
-import importlib.util
-import json
-import logging
-from datetime import datetime
-from pathlib import Path
-from typing import Any, Dict, List, Optional
-
-from talos.core.startup_task import StartupTask
-from talos.core.job_scheduler import JobScheduler
-from talos.core.scheduled_job import ScheduledJob
-
-logger = logging.getLogger(__name__)
-
-
-class StartupTaskRecord:
- """Record of a startup task execution."""
-
- def __init__(
- self,
- task_hash: str,
- name: str,
- executed_at: datetime,
- status: str = "completed",
- error: Optional[str] = None
- ):
- self.task_hash = task_hash
- self.name = name
- self.executed_at = executed_at
- self.status = status
- self.error = error
-
- def to_dict(self) -> Dict[str, Any]:
- return {
- "task_hash": self.task_hash,
- "name": self.name,
- "executed_at": self.executed_at.isoformat(),
- "status": self.status,
- "error": self.error
- }
-
- @classmethod
- def from_dict(cls, data: Dict[str, Any]) -> "StartupTaskRecord":
- return cls(
- task_hash=data["task_hash"],
- name=data["name"],
- executed_at=datetime.fromisoformat(data["executed_at"]),
- status=data.get("status", "completed"),
- error=data.get("error")
- )
-
-
-class StartupTaskManager:
- """
- Manages startup tasks for the Talos daemon.
-
- Provides functionality to:
- - Discover and load tasks from individual hash-named files
- - Execute pending tasks on daemon startup
- - Persist execution records to prevent re-execution
- - Schedule recurring tasks with JobScheduler
- """
-
- def __init__(
- self,
- tasks_dir: Optional[Path] = None,
- completed_tasks_file: Optional[Path] = None,
- job_scheduler: Optional[JobScheduler] = None
- ):
- self.tasks_dir = tasks_dir or Path("startup_tasks")
- self.completed_tasks_file = completed_tasks_file or self.tasks_dir / "completed_tasks.json"
- self.job_scheduler = job_scheduler
- self.discovered_tasks: List[StartupTask] = []
- self.completed_records: List[StartupTaskRecord] = []
-
- self.tasks_dir.mkdir(parents=True, exist_ok=True)
- self.completed_tasks_file.parent.mkdir(parents=True, exist_ok=True)
-
- self._load_completed_records()
- self._discover_tasks()
-
- def _load_completed_records(self) -> None:
- """Load completed task records from file."""
- if not self.completed_tasks_file.exists():
- self.completed_tasks_file.write_text("[]")
- return
-
- try:
- with open(self.completed_tasks_file, "r") as f:
- data = json.load(f)
- self.completed_records = [StartupTaskRecord.from_dict(record) for record in data]
- except Exception as e:
- logger.error(f"Failed to load completed task records: {e}")
- self.completed_records = []
-
- def _save_completed_records(self) -> None:
- """Save completed task records to file."""
- try:
- with open(self.completed_tasks_file, "w") as f:
- data = [record.to_dict() for record in self.completed_records]
- json.dump(data, f, indent=4)
- except Exception as e:
- logger.error(f"Failed to save completed task records: {e}")
-
- def _discover_tasks(self) -> None:
- """Discover and load startup tasks from individual files."""
- self.discovered_tasks = []
-
- if not self.tasks_dir.exists():
- logger.info("No startup tasks directory found")
- return
-
- task_files = []
- for file_path in self.tasks_dir.glob("*.py"):
- if file_path.name != "__init__.py" and len(file_path.stem) >= 8:
- task_files.append(file_path)
-
- task_files.sort(key=lambda f: f.name)
-
- for task_file in task_files:
- try:
- task = self._load_task_from_file(task_file)
- if task:
- self.discovered_tasks.append(task)
- logger.info(f"Discovered startup task: {task.name} from {task_file.name}")
- except Exception as e:
- logger.error(f"Failed to load task from {task_file}: {e}")
-
- logger.info(f"Discovered {len(self.discovered_tasks)} startup tasks")
-
- def _load_task_from_file(self, task_file: Path) -> Optional[StartupTask]:
- """Load a startup task from a Python file."""
- try:
- spec = importlib.util.spec_from_file_location(task_file.stem, task_file)
- if not spec or not spec.loader:
- logger.error(f"Could not load spec for {task_file}")
- return None
-
- module = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(module)
-
- # Look for a function called 'create_task' that returns a StartupTask
- if hasattr(module, 'create_task'):
- task = module.create_task()
- if isinstance(task, StartupTask):
- if not task.task_hash:
- task.task_hash = task_file.stem
- return task
- else:
- logger.error(f"create_task() in {task_file} did not return a StartupTask instance")
- else:
- logger.error(f"No create_task() function found in {task_file}")
-
- except Exception as e:
- logger.error(f"Error loading task from {task_file}: {e}")
-
- return None
-
- def is_task_completed(self, task_hash: str) -> bool:
- """Check if a task has been completed."""
- return any(record.task_hash == task_hash for record in self.completed_records)
-
- def get_pending_tasks(self) -> List[StartupTask]:
- """Get all pending (not yet completed) tasks."""
- pending = []
- for task in self.discovered_tasks:
- if not task.enabled:
- continue
-
- if task.is_recurring():
- pending.append(task)
- elif task.task_hash and not self.is_task_completed(task.task_hash):
- if task.should_execute_now():
- pending.append(task)
-
- return pending
-
- def create_task_file(self, task: StartupTask, custom_hash: Optional[str] = None) -> Path:
- """
- Create a new task file with hash-based filename.
-
- Args:
- task: The StartupTask instance to save
- custom_hash: Optional custom hash to use as filename (defaults to task.generate_hash())
-
- Returns:
- Path to the created task file
- """
- if not task.task_hash:
- task.task_hash = task.generate_hash()
-
- filename = custom_hash or task.task_hash
- task_file = self.tasks_dir / f"{filename}.py"
-
- if task_file.exists():
- raise FileExistsError(f"Task file {task_file} already exists")
-
- task_content = self._generate_task_file_content(task)
-
- task_file.write_text(task_content)
- logger.info(f"Created task file: {task_file}")
-
- return task_file
-
- def _generate_task_file_content(self, task: StartupTask) -> str:
- """Generate the content for a task file."""
- task_class_name = task.__class__.__name__
- task_module = task.__class__.__module__
-
- content = f'''"""
-Startup task: {task.name}
-Generated on: {datetime.now().isoformat()}
-Hash: {task.task_hash}
-"""
-
-from {task_module} import {task_class_name}
-
-
-def create_task() -> {task_class_name}:
- """Create and return the startup task instance."""
- return {task_class_name}(
- name="{task.name}",
- description="{task.description}",
- task_hash="{task.task_hash}",
- enabled={task.enabled},
- execute_at={repr(task.execute_at)},
- cron_expression={repr(task.cron_expression)}
- )
-'''
-
- return content
-
- async def execute_pending_tasks(self) -> None:
- """Execute all pending startup tasks."""
- pending_tasks = self.get_pending_tasks()
-
- if not pending_tasks:
- logger.info("No pending startup tasks to execute")
- return
-
- logger.info(f"Executing {len(pending_tasks)} pending startup tasks")
-
- for task in pending_tasks:
- await self._execute_task(task)
-
- async def _execute_task(self, task: StartupTask) -> None:
- """Execute a single startup task."""
- logger.info(f"Executing startup task: {task.name}")
-
- try:
- await task.run()
-
- if task.is_one_time() and task.task_hash:
- record = StartupTaskRecord(
- task_hash=task.task_hash,
- name=task.name,
- executed_at=datetime.now(),
- status="completed"
- )
- self.completed_records.append(record)
- self._save_completed_records()
- logger.info(f"Startup task '{task.name}' completed successfully")
-
- if task.is_recurring() and self.job_scheduler:
- recurring_job = StartupTaskJob(task)
- self.job_scheduler.register_job(recurring_job)
- logger.info(f"Scheduled recurring startup task: {task.name}")
-
- except Exception as e:
- logger.error(f"Startup task '{task.name}' failed: {e}")
-
- if task.task_hash:
- record = StartupTaskRecord(
- task_hash=task.task_hash,
- name=task.name,
- executed_at=datetime.now(),
- status="failed",
- error=str(e)
- )
- self.completed_records.append(record)
- self._save_completed_records()
-
- def list_completed_tasks(self) -> List[StartupTaskRecord]:
- """Get all completed task records."""
- return self.completed_records.copy()
-
-
-class StartupTaskJob(ScheduledJob):
- """Wrapper to convert StartupTask to ScheduledJob for recurring execution."""
-
- def __init__(self, startup_task: StartupTask):
- self.startup_task = startup_task
- super().__init__(
- name=f"startup_task_{startup_task.name}",
- description=f"Recurring execution of startup task: {startup_task.description}",
- cron_expression=startup_task.cron_expression,
- execute_at=None,
- enabled=startup_task.enabled,
- max_instances=1
- )
-
- async def run(self, **kwargs: Any) -> Any:
- """Execute the wrapped startup task."""
- return await self.startup_task.run(**kwargs)
diff --git a/src/talos/dag/__init__.py b/src/talos/dag/__init__.py
deleted file mode 100644
index c26153dd..00000000
--- a/src/talos/dag/__init__.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-Talos DAG Module
-
-This module implements a DAG (Directed Acyclic Graph) architecture for the Talos AI agent,
-using LangGraph's memory and tools patterns for centralized state management and tool execution.
-
-The DAG architecture supports various node types:
-- Data sources (databases, APIs, files)
-- Agents (specialized AI agents for specific tasks)
-- Prompts (dynamic prompt generation and management)
-- Tools (LangGraph-integrated tools and APIs)
-- Services (backend services and integrations)
-- Skills (specialized capabilities and workflows)
-
-Key features:
-- LangGraph-centric architecture with checkpointer memory
-- Thread-based conversation tracking
-- Integrated tool execution with LangGraph ToolNode
-- Modular design with pluggable node types
-- On-chain representation and storage
-- State management and context passing between nodes
-"""
-
-from talos.dag.dag_agent import DAGAgent
-from talos.dag.graph import TalosDAG
-from talos.dag.manager import DAGManager
-from talos.dag.nodes import (
- DAGNode,
- AgentNode,
- SkillNode,
- ServiceNode,
- ToolNode,
- DataSourceNode,
- PromptNode,
- RouterNode,
- GraphState,
-)
-from talos.dag.extensible_nodes import ExtensibleSkillNode, ConfigurableAgentNode
-from talos.dag.extensible_manager import ExtensibleDAGManager
-from talos.dag.structured_nodes import StructuredSupportAgentNode, StructuredRouterNode, NodeVersion
-from talos.dag.structured_manager import StructuredDAGManager
-
-__all__ = [
- "DAGAgent",
- "TalosDAG",
- "DAGManager",
- "DAGNode",
- "AgentNode",
- "SkillNode",
- "ServiceNode",
- "ToolNode",
- "DataSourceNode",
- "PromptNode",
- "RouterNode",
- "GraphState",
- "ExtensibleSkillNode",
- "ConfigurableAgentNode",
- "ExtensibleDAGManager",
- "StructuredSupportAgentNode",
- "StructuredRouterNode",
- "NodeVersion",
- "StructuredDAGManager",
-]
diff --git a/src/talos/dag/dag_agent.py b/src/talos/dag/dag_agent.py
deleted file mode 100644
index 6bda7159..00000000
--- a/src/talos/dag/dag_agent.py
+++ /dev/null
@@ -1,135 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, List, Optional, Union
-
-from langchain_core.messages import BaseMessage
-from pydantic import BaseModel, ConfigDict
-
-from talos.core.agent import Agent
-from talos.dag.manager import DAGManager
-from talos.dag.nodes import GraphState
-from talos.data.dataset_manager import DatasetManager
-from talos.services.abstract.service import Service
-from talos.skills.base import Skill
-from talos.tools.tool_manager import ToolManager
-
-
-class DAGAgent(Agent):
- """Agent that uses DAG-based execution instead of traditional linear flow."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- dag_manager: Optional[DAGManager] = None
- verbose: Union[bool, int] = False
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- if self.dag_manager is None:
- self.dag_manager = DAGManager()
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def setup_dag(
- self,
- skills: List[Skill],
- services: List[Service],
- tool_manager: ToolManager,
- dataset_manager: Optional[DatasetManager] = None
- ) -> None:
- """Set up the DAG with the provided components."""
- if not self.prompt_manager:
- raise ValueError("Prompt manager must be initialized before setting up DAG")
-
- if self.dag_manager is None:
- self.dag_manager = DAGManager()
-
- from langchain_core.language_models import BaseChatModel
- if not isinstance(self.model, BaseChatModel):
- raise ValueError("DAG requires a BaseChatModel, got: " + str(type(self.model)))
-
- if dataset_manager and self.user_id and not dataset_manager.use_database:
- if self.verbose:
- print("🔄 Upgrading DatasetManager to use database persistence")
-
- from talos.database.session import init_database
- from langchain_openai import OpenAIEmbeddings
-
- init_database()
-
- dataset_manager = DatasetManager(
- verbose=dataset_manager.verbose,
- user_id=self.user_id,
- session_id=self.session_id or "dag-session",
- use_database=True,
- embeddings=OpenAIEmbeddings()
- )
-
- self.dag_manager.create_default_dag(
- model=self.model,
- prompt_manager=self.prompt_manager,
- skills=skills,
- services=services,
- tool_manager=tool_manager,
- dataset_manager=dataset_manager
- )
-
- def run(self, message: str, history: list[BaseMessage] | None = None, **kwargs) -> BaseModel:
- """Execute the query using the DAG with LangGraph memory patterns."""
- thread_id = kwargs.get("thread_id", "default_conversation")
-
- context = self._build_context(message, **kwargs)
- context.update(kwargs)
-
- try:
- if self.dag_manager is None:
- raise ValueError("DAG manager not initialized")
- result_state = self.dag_manager.execute_dag(message, context, thread_id=thread_id)
-
- processed_result = self._process_dag_result(result_state, message)
- return processed_result
-
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"DAG execution failed, falling back to traditional agent: {e}")
- return super().run(message, history, **kwargs)
-
- def _process_dag_result(self, result_state: GraphState, original_query: str) -> BaseModel:
- """Process the DAG execution result into a standard agent response."""
- from langchain_core.messages import AIMessage
-
- results = result_state.get("results", {})
- messages = result_state.get("messages", [])
-
- response_parts = []
-
- if results:
- response_parts.append("DAG Execution Results:")
- for node_id, result in results.items():
- response_parts.append(f"- {node_id}: {str(result)[:200]}...")
-
- if messages:
- response_parts.append("\nExecution Flow:")
- response_parts.extend(f"- {msg}" for msg in messages[-5:]) # Last 5 messages
-
- response_content = "\n".join(response_parts) if response_parts else "DAG execution completed"
-
- ai_message = AIMessage(content=response_content)
- self.history.append(ai_message)
-
- return ai_message
-
- def get_dag_visualization(self) -> str:
- """Get a visualization of the current DAG structure."""
- if self.dag_manager is None:
- return "DAG manager not initialized"
- return self.dag_manager.get_dag_visualization()
-
- def get_dag_config_for_chain(self) -> str:
- """Get the DAG configuration serialized for on-chain storage."""
- if self.dag_manager is None:
- return "{}"
- return self.dag_manager.serialize_dag_for_chain()
diff --git a/src/talos/dag/extensible_manager.py b/src/talos/dag/extensible_manager.py
deleted file mode 100644
index d25e8d62..00000000
--- a/src/talos/dag/extensible_manager.py
+++ /dev/null
@@ -1,241 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
-
-from langchain_core.language_models import BaseChatModel
-from pydantic import ConfigDict
-
-from talos.dag.extensible_nodes import ExtensibleSkillNode, ConfigurableAgentNode
-
-if TYPE_CHECKING:
- from talos.core.extensible_agent import SupportAgent, SupportAgentRegistry
-from talos.dag.graph import TalosDAG
-from talos.dag.manager import DAGManager
-from talos.dag.nodes import (
- DataSourceNode, PromptNode, RouterNode, ToolNode
-)
-from talos.data.dataset_manager import DatasetManager
-from talos.prompts.prompt_manager import PromptManager
-from talos.services.abstract.service import Service
-from talos.tools.tool_manager import ToolManager
-
-
-class ExtensibleDAGManager(DAGManager):
- """
- Enhanced DAG manager that supports extensible skill agents and dynamic reconfiguration.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- skill_registry: Optional["SupportAgentRegistry"] = None
-
- def create_extensible_dag(
- self,
- model: BaseChatModel,
- prompt_manager: PromptManager,
- skill_registry: "SupportAgentRegistry",
- services: List[Service],
- tool_manager: ToolManager,
- dataset_manager: Optional[DatasetManager] = None,
- dag_name: str = "extensible_talos_dag"
- ) -> TalosDAG:
- """Create a DAG with extensible skill agents."""
- self.skill_registry = skill_registry
-
- dag = TalosDAG(
- name=dag_name,
- description="Extensible Talos agent DAG with configurable skill agents"
- )
-
- from talos.prompts.prompt_config import PromptConfig, StaticPromptSelector
-
- legacy_config = PromptConfig(
- selector=StaticPromptSelector(
- prompt_names=["main_agent_prompt", "general_agent_prompt"]
- )
- )
-
- prompt_node = PromptNode(
- node_id="main_prompt",
- name="Main Agent Prompt",
- description="Primary prompt for the extensible Talos agent",
- prompt_manager=prompt_manager,
- prompt_config=legacy_config
- )
- dag.add_node(prompt_node)
-
- routing_logic = {}
- skill_agents = skill_registry.get_all_agents()
-
- for skill_name, skill_agent in skill_agents.items():
- routing_logic[skill_name.lower()] = f"{skill_name}_skill"
-
- if "proposal" in skill_name.lower():
- routing_logic["proposal"] = f"{skill_name}_skill"
- routing_logic["governance"] = f"{skill_name}_skill"
- elif "twitter" in skill_name.lower():
- routing_logic["twitter"] = f"{skill_name}_skill"
- routing_logic["sentiment"] = f"{skill_name}_skill"
- elif "github" in skill_name.lower() or "pr" in skill_name.lower():
- routing_logic["github"] = f"{skill_name}_skill"
- routing_logic["review"] = f"{skill_name}_skill"
- elif "crypto" in skill_name.lower():
- routing_logic["crypto"] = f"{skill_name}_skill"
- routing_logic["encrypt"] = f"{skill_name}_skill"
-
- router_node = RouterNode(
- node_id="extensible_router",
- name="Extensible Router",
- description="Routes queries to appropriate extensible skill agents",
- routing_logic=routing_logic
- )
- dag.add_node(router_node)
-
- if dataset_manager:
- data_node = DataSourceNode(
- node_id="dataset_source",
- name="Dataset Manager",
- description="Provides relevant documents and context",
- data_source=dataset_manager
- )
- dag.add_node(data_node)
- dag.add_edge("main_prompt", "dataset_source")
- dag.add_edge("dataset_source", "extensible_router")
- else:
- dag.add_edge("main_prompt", "extensible_router")
-
- for skill_name, skill_agent in skill_agents.items():
- skill_node = ExtensibleSkillNode(
- node_id=f"{skill_name}_skill",
- name=f"{skill_name.title()} Skill",
- description=skill_agent.description or f"Extensible skill for {skill_name} operations",
- skill_agent=skill_agent
- )
- dag.add_node(skill_node)
-
- for service in services:
- service_node = ConfigurableAgentNode(
- node_id=f"{service.name}_service",
- name=f"{service.name.title()} Service",
- description=f"Configurable service for {service.name} operations",
- agent_config={"service_type": type(service).__name__},
- model=model
- )
- dag.add_node(service_node)
-
- if tool_manager.tools:
- tools_list = list(tool_manager.tools.values())
- tool_node = ToolNode(
- node_id="extensible_tools",
- name="Extensible Tools",
- description="LangGraph tools for various operations",
- tools=tools_list
- )
- dag.add_node(tool_node)
-
- conditional_targets = {}
- for keyword, target in routing_logic.items():
- if target in [node.node_id for node in dag.nodes.values()]:
- conditional_targets[target] = target
-
- if conditional_targets:
- dag.add_conditional_edge("extensible_router", conditional_targets)
-
- self.current_dag = dag
- return dag
-
- def add_skill_to_dag(self, skill_agent: "SupportAgent") -> bool:
- """Add a new skill agent to the current DAG."""
- if not self.current_dag or not self.skill_registry:
- return False
-
- self.skill_registry.register_agent(skill_agent)
-
- skill_node = ExtensibleSkillNode(
- node_id=f"{skill_agent.name}_skill",
- name=f"{skill_agent.name.title()} Skill",
- description=skill_agent.description or f"Extensible skill for {skill_agent.name} operations",
- skill_agent=skill_agent
- )
- self.current_dag.add_node(skill_node)
-
- router_node = self.current_dag.nodes.get("extensible_router")
- if router_node and hasattr(router_node, 'routing_logic'):
- router_node.routing_logic[skill_agent.name.lower()] = f"{skill_agent.name}_skill"
-
- conditional_targets = {}
- for keyword, target in router_node.routing_logic.items():
- if target in [node.node_id for node in self.current_dag.nodes.values()]:
- conditional_targets[target] = target
-
- if conditional_targets:
- self.current_dag.conditional_edges["extensible_router"] = conditional_targets
- self.current_dag._rebuild_graph()
-
- return True
-
- def remove_skill_from_dag(self, skill_name: str) -> bool:
- """Remove a skill agent from the current DAG."""
- if not self.current_dag or not self.skill_registry:
- return False
-
- success = self.skill_registry.unregister_agent(skill_name)
- if not success:
- return False
-
- node_id = f"{skill_name}_skill"
- success = self.current_dag.remove_node(node_id)
-
- router_node = self.current_dag.nodes.get("extensible_router")
- if router_node and hasattr(router_node, 'routing_logic'):
- keys_to_remove = [k for k, v in router_node.routing_logic.items() if v == node_id]
- for key in keys_to_remove:
- del router_node.routing_logic[key]
-
- conditional_targets = {}
- for keyword, target in router_node.routing_logic.items():
- if target in [node.node_id for node in self.current_dag.nodes.values()]:
- conditional_targets[target] = target
-
- if conditional_targets:
- self.current_dag.conditional_edges["extensible_router"] = conditional_targets
- else:
- if "extensible_router" in self.current_dag.conditional_edges:
- del self.current_dag.conditional_edges["extensible_router"]
-
- self.current_dag._rebuild_graph()
-
- return success
-
- def get_extensible_dag_status(self) -> Dict[str, Any]:
- """Get status of the extensible DAG."""
- if not self.current_dag:
- return {"status": "No DAG available"}
-
- skill_nodes = {}
- configurable_nodes = {}
-
- for node_id, node in self.current_dag.nodes.items():
- if isinstance(node, ExtensibleSkillNode):
- skill_nodes[node_id] = {
- "name": node.name,
- "skill_agent": node.skill_agent.name,
- "domain": node.skill_agent.domain,
- "architecture": node.skill_agent.architecture
- }
- elif isinstance(node, ConfigurableAgentNode):
- configurable_nodes[node_id] = {
- "name": node.name,
- "config": node.agent_config,
- "has_individual_model": node.model is not None
- }
-
- return {
- "dag_name": self.current_dag.name,
- "total_nodes": len(self.current_dag.nodes),
- "skill_nodes": skill_nodes,
- "configurable_nodes": configurable_nodes,
- "edges": self.current_dag.edges,
- "conditional_edges": list(self.current_dag.conditional_edges.keys()),
- "registered_skills": self.skill_registry.list_agents() if self.skill_registry else []
- }
diff --git a/src/talos/dag/extensible_nodes.py b/src/talos/dag/extensible_nodes.py
deleted file mode 100644
index 374767cd..00000000
--- a/src/talos/dag/extensible_nodes.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, Optional, TYPE_CHECKING
-
-from langchain_core.language_models import BaseChatModel
-from langchain_core.messages import AIMessage
-from pydantic import ConfigDict
-
-from talos.dag.nodes import DAGNode, GraphState
-
-if TYPE_CHECKING:
- from talos.core.extensible_agent import SupportAgent
-
-
-class ExtensibleSkillNode(DAGNode):
- """
- Enhanced skill node that supports individual configurations and chat capabilities.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- skill_agent: "SupportAgent"
- node_type: str = "extensible_skill"
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute the skill agent with enhanced capabilities."""
- query = state["current_query"]
- context = state.get("context", {})
-
- enhanced_context = self.skill_agent.analyze_task(query, context)
-
- enhanced_context.update({
- "current_query": query,
- "messages": state.get("messages", []),
- "results": state.get("results", {}),
- "metadata": state.get("metadata", {})
- })
-
- result = self.skill_agent.execute_task(enhanced_context)
-
- state["results"][self.node_id] = result
- state["messages"].append(
- AIMessage(content=f"Extensible skill {self.name} executed: {str(result)[:100]}...")
- )
-
- state["metadata"][f"{self.node_id}_config"] = {
- "domain": self.skill_agent.domain,
- "architecture": self.skill_agent.architecture,
- "skills_count": len(self.skill_agent.skills)
- }
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- """Return enhanced configuration for serialization."""
- base_config = {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "skill_name": self.skill_agent.name,
- "metadata": self.metadata
- }
-
- base_config["skill_agent_config"] = {
- "domain": self.skill_agent.domain,
- "architecture": self.skill_agent.architecture,
- "delegation_keywords": self.skill_agent.delegation_keywords,
- "has_individual_model": self.skill_agent.model is not None,
- "skill_description": self.skill_agent.description
- }
-
- return base_config
-
-
-class ConfigurableAgentNode(DAGNode):
- """
- Agent node that can be configured with different models and settings.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- agent_config: Dict[str, Any]
- model: Optional[BaseChatModel] = None
- node_type: str = "configurable_agent"
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute with configurable agent settings."""
- query = state["current_query"]
-
- model = self.model
- if not model:
- from langchain_openai import ChatOpenAI
- model = ChatOpenAI(model="gpt-4o-mini")
-
- try:
- from langchain_core.messages import HumanMessage
- response = model.invoke([HumanMessage(content=f"Process this query: {query}")])
- result = response.content
- except Exception as e:
- result = f"Error in configurable agent: {str(e)}"
-
- state["results"][self.node_id] = result
- state["messages"].append(
- AIMessage(content=f"Configurable agent {self.name} processed: {query}")
- )
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "agent_config": self.agent_config,
- "has_individual_model": self.model is not None,
- "metadata": self.metadata
- }
diff --git a/src/talos/dag/graph.py b/src/talos/dag/graph.py
deleted file mode 100644
index da6f0db3..00000000
--- a/src/talos/dag/graph.py
+++ /dev/null
@@ -1,181 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import Any, Dict, List, Optional
-
-from langgraph.graph import StateGraph, START, END
-from langgraph.checkpoint.memory import MemorySaver
-from pydantic import BaseModel, ConfigDict
-
-from talos.dag.nodes import DAGNode, GraphState
-
-
-class TalosDAG(BaseModel):
- """Main DAG class that manages the LangGraph StateGraph."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- name: str
- description: Optional[str] = None
- nodes: Dict[str, DAGNode] = {}
- edges: List[tuple[str, str]] = []
- conditional_edges: Dict[str, Dict[str, str]] = {}
- graph: Optional[StateGraph] = None
- compiled_graph: Optional[Any] = None
- checkpointer: Optional[MemorySaver] = None
-
- def add_node(self, node: DAGNode) -> None:
- """Add a node to the DAG."""
- self.nodes[node.node_id] = node
- self._rebuild_graph()
-
- def remove_node(self, node_id: str) -> bool:
- """Remove a node from the DAG."""
- if node_id in self.nodes:
- del self.nodes[node_id]
- self.edges = [(src, dst) for src, dst in self.edges if src != node_id and dst != node_id]
- self.conditional_edges = {k: v for k, v in self.conditional_edges.items() if k != node_id}
- self._rebuild_graph()
- return True
- return False
-
- def add_edge(self, source: str, destination: str) -> None:
- """Add a direct edge between two nodes."""
- self.edges.append((source, destination))
- self._rebuild_graph()
-
- def add_conditional_edge(self, source: str, conditions: Dict[str, str]) -> None:
- """Add conditional edges from a source node."""
- self.conditional_edges[source] = conditions
- self._rebuild_graph()
-
- def _rebuild_graph(self) -> None:
- """Rebuild the LangGraph StateGraph from current nodes and edges."""
- if not self.nodes:
- return
-
- self.graph = StateGraph(GraphState)
-
- for node_id, node in self.nodes.items():
- self.graph.add_node(node_id, node.execute)
-
- for source, destination in self.edges:
- if source in self.nodes and destination in self.nodes:
- self.graph.add_edge(source, destination)
-
- for source, conditions in self.conditional_edges.items():
- if source in self.nodes:
- def route_function(state: GraphState) -> str:
- next_node = state.get("context", {}).get("next_node", "default")
- return conditions.get(next_node, END)
-
- self.graph.add_conditional_edges(
- source,
- route_function,
- list(conditions.values())
- )
-
- if self.nodes:
- first_node = next(iter(self.nodes.keys()))
- self.graph.add_edge(START, first_node)
-
- self.checkpointer = MemorySaver()
- self.compiled_graph = self.graph.compile(checkpointer=self.checkpointer)
-
- def execute(self, initial_state: GraphState, thread_id: str = "default") -> GraphState:
- """Execute the DAG with the given initial state and thread ID for memory."""
- if not self.compiled_graph:
- self._rebuild_graph()
-
- if not self.compiled_graph:
- raise ValueError("No compiled graph available for execution")
-
- config = {"configurable": {"thread_id": thread_id}}
- result = self.compiled_graph.invoke(initial_state, config=config)
- return result
-
- def get_graph_config(self) -> Dict[str, Any]:
- """Get the complete graph configuration for serialization."""
- return {
- "name": self.name,
- "description": self.description,
- "nodes": {node_id: node.get_node_config() for node_id, node in self.nodes.items()},
- "edges": self.edges,
- "conditional_edges": self.conditional_edges,
- "metadata": {
- "node_count": len(self.nodes),
- "edge_count": len(self.edges),
- "conditional_edge_count": len(self.conditional_edges)
- }
- }
-
- def serialize_to_json(self) -> str:
- """Serialize the DAG configuration to JSON for on-chain storage."""
- config = self.get_graph_config()
- return json.dumps(config, indent=2)
-
- def serialize_for_blockchain(self) -> Dict[str, Any]:
- """Serialize DAG for blockchain storage with deterministic ordering."""
- config = self.get_graph_config()
-
- sorted_nodes = dict(sorted(config["nodes"].items()))
- sorted_edges = sorted(config["edges"])
- sorted_conditional_edges = dict(sorted(config["conditional_edges"].items()))
-
- blockchain_config = {
- "dag_version": "1.0.0",
- "name": self.name,
- "description": self.description,
- "nodes": sorted_nodes,
- "edges": sorted_edges,
- "conditional_edges": sorted_conditional_edges,
- "metadata": config["metadata"],
- "checksum": self._calculate_dag_checksum(sorted_nodes, sorted_edges)
- }
-
- return blockchain_config
-
- def _calculate_dag_checksum(self, nodes: Dict[str, Any], edges: List[tuple]) -> str:
- """Calculate deterministic checksum for DAG state."""
- import hashlib
- dag_data = {
- "nodes": nodes,
- "edges": edges
- }
- dag_json = json.dumps(dag_data, sort_keys=True)
- return hashlib.sha256(dag_json.encode()).hexdigest()
-
- def validate_upgrade_compatibility(self, new_node_config: Dict[str, Any]) -> bool:
- """Validate if a node upgrade is compatible with current DAG."""
- node_id = new_node_config.get("node_id")
- if not node_id or node_id not in self.nodes:
- return False
-
- current_node = self.nodes[node_id]
- if hasattr(current_node, 'node_version') and hasattr(current_node, 'upgrade_policy'):
- return True
-
- return False
-
- def visualize_graph(self) -> str:
- """Return a text representation of the graph structure."""
- lines = [f"DAG: {self.name}"]
- if self.description:
- lines.append(f"Description: {self.description}")
-
- lines.append("\nNodes:")
- for node_id, node in self.nodes.items():
- lines.append(f" - {node_id} ({node.node_type}): {node.name}")
-
- lines.append("\nEdges:")
- for source, destination in self.edges:
- lines.append(f" - {source} -> {destination}")
-
- if self.conditional_edges:
- lines.append("\nConditional Edges:")
- for source, conditions in self.conditional_edges.items():
- lines.append(f" - {source}:")
- for condition, target in conditions.items():
- lines.append(f" - {condition} -> {target}")
-
- return "\n".join(lines)
diff --git a/src/talos/dag/manager.py b/src/talos/dag/manager.py
deleted file mode 100644
index b6a3920e..00000000
--- a/src/talos/dag/manager.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, List, Optional
-
-from langchain_core.language_models import BaseChatModel
-from pydantic import BaseModel, ConfigDict
-
-from talos.dag.graph import TalosDAG
-from talos.dag.nodes import (
- SkillNode, ServiceNode, ToolNode,
- DataSourceNode, PromptNode, RouterNode, GraphState
-)
-from talos.data.dataset_manager import DatasetManager
-from talos.prompts.prompt_manager import PromptManager
-from talos.services.abstract.service import Service
-from talos.skills.base import Skill
-from talos.tools.tool_manager import ToolManager
-
-
-class DAGManager(BaseModel):
- """Manages DAG creation, modification, and execution."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- current_dag: Optional[TalosDAG] = None
- dag_history: List[TalosDAG] = []
-
- def create_default_dag(
- self,
- model: BaseChatModel,
- prompt_manager: PromptManager,
- skills: List[Skill],
- services: List[Service],
- tool_manager: ToolManager,
- dataset_manager: Optional[DatasetManager] = None
- ) -> TalosDAG:
- """Create a default DAG from existing Talos components."""
- dag = TalosDAG(
- name="talos_default_dag",
- description="Default Talos agent DAG with integrated skills, services, and tools"
- )
-
- from talos.prompts.prompt_config import PromptConfig, StaticPromptSelector
-
- legacy_config = PromptConfig(
- selector=StaticPromptSelector(
- prompt_names=["main_agent_prompt", "general_agent_prompt"]
- )
- )
-
- prompt_node = PromptNode(
- node_id="main_prompt",
- name="Main Agent Prompt",
- description="Primary prompt for the Talos agent",
- prompt_manager=prompt_manager,
- prompt_config=legacy_config
- )
- dag.add_node(prompt_node)
-
- routing_logic = {
- "proposal": "proposals_skill",
- "twitter": "twitter_sentiment_skill",
- "github": "pr_review_skill",
- "crypto": "cryptography_skill",
- "sentiment": "twitter_sentiment_skill",
- "review": "pr_review_skill"
- }
- router_node = RouterNode(
- node_id="main_router",
- name="Main Router",
- description="Routes queries to appropriate skills",
- routing_logic=routing_logic
- )
- dag.add_node(router_node)
-
- if dataset_manager:
- data_node = DataSourceNode(
- node_id="dataset_source",
- name="Dataset Manager",
- description="Provides relevant documents and context",
- data_source=dataset_manager
- )
- dag.add_node(data_node)
- dag.add_edge("main_prompt", "dataset_source")
- dag.add_edge("dataset_source", "main_router")
- else:
- dag.add_edge("main_prompt", "main_router")
-
- for skill in skills:
- skill_node = SkillNode(
- node_id=f"{skill.name}_skill",
- name=f"{skill.name.title()} Skill",
- description=f"Skill for {skill.name} operations",
- skill=skill
- )
- dag.add_node(skill_node)
-
- for service in services:
- service_node = ServiceNode(
- node_id=f"{service.name}_service",
- name=f"{service.name.title()} Service",
- description=f"Service for {service.name} operations",
- service=service
- )
- dag.add_node(service_node)
-
- if tool_manager.tools:
- tools_list = list(tool_manager.tools.values())
- tool_node = ToolNode(
- node_id="tools",
- name="Tools",
- description="LangGraph tools for various operations",
- tools=tools_list
- )
- dag.add_node(tool_node)
-
- conditional_targets = {}
- for keyword, target in routing_logic.items():
- if target in [node.node_id for node in dag.nodes.values()]:
- conditional_targets[target] = target
-
- if conditional_targets:
- dag.add_conditional_edge("main_router", conditional_targets)
-
- self.current_dag = dag
- return dag
-
- def execute_dag(self, query: str, context: Optional[Dict[str, Any]] = None, thread_id: str = "default") -> GraphState:
- """Execute the current DAG with a query."""
- if not self.current_dag:
- raise ValueError("No DAG available for execution")
-
- initial_state: GraphState = {
- "messages": [],
- "context": context or {},
- "current_query": query,
- "results": {},
- "metadata": {"dag_name": self.current_dag.name}
- }
-
- return self.current_dag.execute(initial_state, thread_id=thread_id)
-
- def get_dag_visualization(self) -> str:
- """Get a text visualization of the current DAG."""
- if not self.current_dag:
- return "No DAG available"
-
- return self.current_dag.visualize_graph()
-
- def serialize_dag_for_chain(self) -> str:
- """Serialize the current DAG for on-chain storage."""
- if not self.current_dag:
- return "{}"
-
- return self.current_dag.serialize_to_json()
-
- def rollback_to_previous_dag(self) -> bool:
- """Rollback to the previous DAG version."""
- if not self.dag_history:
- return False
-
- self.current_dag = self.dag_history.pop()
- return True
diff --git a/src/talos/dag/nodes.py b/src/talos/dag/nodes.py
deleted file mode 100644
index bc8ac8fd..00000000
--- a/src/talos/dag/nodes.py
+++ /dev/null
@@ -1,301 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Dict, List, Optional, TypedDict, TYPE_CHECKING
-
-from langchain_core.tools import BaseTool
-from langchain_core.messages import BaseMessage
-from langgraph.prebuilt import ToolNode as LangGraphToolNode
-from pydantic import BaseModel, ConfigDict
-
-from talos.core.agent import Agent
-from talos.data.dataset_manager import DatasetManager
-from talos.prompts.prompt_manager import PromptManager
-from talos.services.abstract.service import Service
-from talos.skills.base import Skill
-
-if TYPE_CHECKING:
- from talos.prompts.prompt_config import PromptConfig
-else:
- PromptConfig = "PromptConfig"
-
-
-class GraphState(TypedDict):
- """State that flows through the DAG nodes."""
- messages: List[BaseMessage]
- context: Dict[str, Any]
- current_query: str
- results: Dict[str, Any]
- metadata: Dict[str, Any]
-
-
-class DAGNode(BaseModel, ABC):
- """Abstract base class for all DAG nodes."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- node_id: str
- node_type: str
- name: str
- description: Optional[str] = None
- metadata: Dict[str, Any] = {}
-
- @abstractmethod
- def execute(self, state: GraphState) -> GraphState:
- """Execute the node's functionality and return updated state."""
- pass
-
- @abstractmethod
- def get_node_config(self) -> Dict[str, Any]:
- """Return configuration for serialization."""
- pass
-
-
-class AgentNode(DAGNode):
- """Node that wraps an Agent for execution in the DAG."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- agent: Agent
- node_type: str = "agent"
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute the agent with the current query."""
- query = state["current_query"]
- result = self.agent.run(query)
-
- state["results"][self.node_id] = result
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Agent {self.name} processed: {query}"))
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "agent_type": type(self.agent).__name__,
- "metadata": self.metadata
- }
-
-
-class SkillNode(DAGNode):
- """Node that wraps a Skill for execution in the DAG."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- skill: Skill
- node_type: str = "skill"
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute the skill with parameters from state."""
- context = state.get("context", {})
- result = self.skill.run(**context)
-
- state["results"][self.node_id] = result
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Skill {self.name} executed"))
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "skill_name": self.skill.name,
- "metadata": self.metadata
- }
-
-
-class ServiceNode(DAGNode):
- """Node that wraps a Service for execution in the DAG."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- service: Service
- node_type: str = "service"
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute the service with parameters from state."""
- state["results"][self.node_id] = f"Service {self.service.name} executed"
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Service {self.name} processed"))
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "service_name": self.service.name,
- "metadata": self.metadata
- }
-
-
-class ToolNode(DAGNode):
- """Node that wraps LangGraph's ToolNode for execution in the DAG."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- tools: List[BaseTool]
- node_type: str = "tool"
- _langgraph_tool_node: Optional[LangGraphToolNode] = None
-
- def __init__(self, **data):
- super().__init__(**data)
- if self.tools:
- self._langgraph_tool_node = LangGraphToolNode(self.tools)
-
- def execute(self, state: GraphState) -> GraphState:
- """Execute the tools using LangGraph's ToolNode."""
- if not self._langgraph_tool_node:
- state["results"][self.node_id] = "Error: No tools configured"
- return state
-
- try:
- result = self._langgraph_tool_node.invoke(state)
- state.update(result)
- state["results"][self.node_id] = "Tools executed successfully"
- except Exception as e:
- state["results"][self.node_id] = f"Error: {str(e)}"
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "tool_count": len(self.tools) if self.tools else 0
- }
-
-
-class DataSourceNode(DAGNode):
- """Node that provides data from various sources."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- data_source: Any
- node_type: str = "data_source"
-
- def execute(self, state: GraphState) -> GraphState:
- """Retrieve data from the data source."""
- query = state["current_query"]
-
- if isinstance(self.data_source, DatasetManager):
- result = self.data_source.search(query, k=5)
- state["results"][self.node_id] = result
- state["context"]["relevant_documents"] = result
- else:
- state["results"][self.node_id] = f"Data from {self.name}"
-
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Data source {self.name} provided data"))
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "data_source_type": type(self.data_source).__name__,
- "metadata": self.metadata
- }
-
-
-class PromptNode(DAGNode):
- """Node that manages prompts and prompt templates."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- prompt_manager: PromptManager
- prompt_names: Optional[List[str]] = None
- prompt_config: Optional["PromptConfig"] = None
- node_type: str = "prompt"
-
- def __init__(self, **data):
- if not data.get('prompt_names') and not data.get('prompt_config'):
- raise ValueError("Either prompt_names or prompt_config must be provided")
- super().__init__(**data)
-
- def execute(self, state: GraphState) -> GraphState:
- """Apply prompt templates to the current context."""
- if self.prompt_config:
- prompt = self.prompt_manager.get_prompt_with_config(
- self.prompt_config,
- state.get("context", {})
- )
- config_desc = "declarative config"
- else:
- prompt = self.prompt_manager.get_prompt(self.prompt_names or [])
- config_desc = f"prompt names: {', '.join(self.prompt_names or [])}"
-
- if prompt:
- state["context"]["active_prompt"] = prompt.template
- state["results"][self.node_id] = f"Applied prompt using {config_desc}"
- else:
- state["results"][self.node_id] = f"Failed to load prompt using {config_desc}"
-
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Prompt node {self.name} processed"))
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- config = {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "metadata": self.metadata
- }
-
- if self.prompt_config:
- config["prompt_config"] = "declarative"
- else:
- config["prompt_names"] = ", ".join(self.prompt_names) if self.prompt_names else None
-
- return config
-
-
-class RouterNode(DAGNode):
- """Node that routes execution to different paths based on conditions."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- routing_logic: Dict[str, str]
- node_type: str = "router"
-
- def execute(self, state: GraphState) -> GraphState:
- """Determine the next node based on routing logic."""
- query = state["current_query"].lower()
-
- next_node = None
- for keyword, target_node in self.routing_logic.items():
- if keyword in query:
- next_node = target_node
- break
-
- state["context"]["next_node"] = next_node or "default"
- state["results"][self.node_id] = f"Routed to: {next_node or 'default'}"
- from langchain_core.messages import AIMessage
- state["messages"].append(AIMessage(content=f"Router {self.name} determined next path"))
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "routing_logic": self.routing_logic,
- "metadata": self.metadata
- }
diff --git a/src/talos/dag/proposal_skill.py b/src/talos/dag/proposal_skill.py
deleted file mode 100644
index 8b137891..00000000
--- a/src/talos/dag/proposal_skill.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/talos/dag/structured_manager.py b/src/talos/dag/structured_manager.py
deleted file mode 100644
index 71bee0e8..00000000
--- a/src/talos/dag/structured_manager.py
+++ /dev/null
@@ -1,448 +0,0 @@
-from __future__ import annotations
-
-import hashlib
-import json
-from typing import Any, Dict, List, Optional
-
-from langchain_core.language_models import BaseChatModel
-from pydantic import ConfigDict
-
-from talos.dag.graph import TalosDAG
-from talos.dag.manager import DAGManager
-from talos.dag.nodes import PromptNode, DataSourceNode, ToolNode
-from talos.dag.structured_nodes import StructuredSupportAgentNode, StructuredRouterNode, NodeVersion
-from talos.data.dataset_manager import DatasetManager
-from talos.prompts.prompt_manager import PromptManager
-from talos.services.abstract.service import Service
-from talos.tools.tool_manager import ToolManager
-
-pass
-
-
-class StructuredDAGManager(DAGManager):
- """
- Manager for structured DAGs with controlled node upgrades and blockchain-native capabilities.
-
- This class extends the base DAGManager to provide deterministic DAG construction,
- versioned node management, and blockchain-compatible serialization. It's designed
- to enable individual component upgrades in a distributed AI system while maintaining
- deterministic behavior and upgrade safety.
-
- Key Features:
- - Controlled node upgrade methodology with version validation
- - Deterministic DAG structure creation and management
- - Blockchain-native serialization with reproducible hashing
- - Individual node rollback capabilities
- - Upgrade policy enforcement and compatibility checking
-
- Blockchain-Native Design:
- - All operations produce deterministic, reproducible results
- - DAG structure is serialized with consistent ordering
- - Node upgrades are validated and logged for auditability
- - Delegation patterns use hash-based verification
- - Export format is suitable for on-chain storage
-
- The manager maintains a registry of StructuredSupportAgentNode instances,
- each with semantic versioning and upgrade policies. It ensures that all
- DAG modifications follow controlled upgrade paths and maintain system integrity.
-
- Attributes:
- node_registry: Registry of versioned support agent nodes
- dag_version: Current version of the DAG structure
- delegation_hash: Hash of current delegation rules
-
- Examples:
- >>> manager = StructuredDAGManager()
- >>> agent = SupportAgent(name="governance", domain="governance", ...)
- >>> manager.add_support_agent(agent, NodeVersion(1, 0, 0))
- >>> dag = manager.create_structured_dag(...)
- >>> manager.upgrade_node("governance", new_agent, NodeVersion(1, 1, 0))
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- node_registry: Dict[str, StructuredSupportAgentNode] = {}
- delegation_hash: str = ""
- dag_version: str = "1.0.0"
-
- def create_structured_dag(
- self,
- model: BaseChatModel,
- prompt_manager: PromptManager,
- support_agents: Dict[str, Any],
- services: List[Service],
- tool_manager: ToolManager,
- dataset_manager: Optional[DatasetManager] = None,
- dag_name: str = "structured_talos_dag"
- ) -> TalosDAG:
- """
- Create a structured DAG with controlled node management and deterministic architecture.
-
- This method constructs a blockchain-native DAG with the following structure:
- 1. Router node for deterministic task delegation
- 2. Individual support agent nodes with versioning
- 3. Shared prompt and data source nodes
- 4. Deterministic edge connections
-
- The resulting DAG ensures:
- - Reproducible execution paths
- - Individual node upgrade capabilities
- - Blockchain-compatible serialization
- - Deterministic delegation patterns
-
- Args:
- model: Language model for agent operations
- prompt_manager: Manager for prompt templates
- support_agents: Dictionary of support agents to include as nodes
- services: List of services for the DAG
- tool_manager: Manager for tools and capabilities
- dataset_manager: Optional dataset manager for data source nodes
- dag_name: Unique name for the DAG instance
-
- Returns:
- Configured TalosDAG with structured node architecture
-
- Raises:
- ValueError: If DAG construction fails or validation errors occur
- """
-
- dag = TalosDAG(
- name=dag_name,
- description="Structured Talos agent DAG with blockchain-native node upgrades"
- )
-
- delegation_rules = self._create_deterministic_delegation(support_agents)
- self.delegation_hash = self._calculate_delegation_hash(delegation_rules)
-
- from talos.prompts.prompt_config import PromptConfig, StaticPromptSelector
-
- legacy_config = PromptConfig(
- selector=StaticPromptSelector(
- prompt_names=["main_agent_prompt", "general_agent_prompt"]
- )
- )
-
- prompt_node = PromptNode(
- node_id="main_prompt",
- name="Main Agent Prompt",
- description="Primary prompt for the structured Talos agent",
- prompt_manager=prompt_manager,
- prompt_config=legacy_config
- )
- dag.add_node(prompt_node)
-
- if dataset_manager:
- data_node = DataSourceNode(
- node_id="dataset_source",
- name="Dataset Manager",
- description="Provides relevant documents and context",
- data_source=dataset_manager
- )
- dag.add_node(data_node)
- dag.add_edge("main_prompt", "dataset_source")
-
- router_node = StructuredRouterNode(
- node_id="structured_router",
- name="Structured Router",
- description="Deterministic router with hash-based delegation",
- delegation_rules=delegation_rules
- )
- dag.add_node(router_node)
-
- if dataset_manager:
- dag.add_edge("dataset_source", "structured_router")
- else:
- dag.add_edge("main_prompt", "structured_router")
-
- for domain, agent in support_agents.items():
- structured_node = StructuredSupportAgentNode(
- node_id=f"{domain}_agent",
- name=f"{domain.title()} Agent",
- description=agent.description,
- support_agent=agent,
- node_version=NodeVersion(major=1, minor=0, patch=0)
- )
- dag.add_node(structured_node)
- self.node_registry[domain] = structured_node
-
- if tool_manager.tools:
- tools_list = list(tool_manager.tools.values())
- tool_node = ToolNode(
- node_id="structured_tools",
- name="Structured Tools",
- description="LangGraph tools for structured operations",
- tools=tools_list
- )
- dag.add_node(tool_node)
-
- conditional_targets = {}
- for keyword, target in delegation_rules.items():
- if target in [node.node_id for node in dag.nodes.values()]:
- conditional_targets[target] = target
-
- if conditional_targets:
- dag.add_conditional_edge("structured_router", conditional_targets)
-
- self.current_dag = dag
- return dag
-
- def _create_deterministic_delegation(self, support_agents: Dict[str, Any]) -> Dict[str, str]:
- """Create deterministic delegation rules based on support agents."""
- delegation_rules = {}
-
- for domain, agent in support_agents.items():
- target_node = f"{domain}_agent"
-
- for keyword in agent.delegation_keywords:
- delegation_rules[keyword.lower()] = target_node
-
- for pattern in agent.task_patterns:
- key_words = pattern.lower().split()
- for word in key_words:
- if len(word) > 3:
- delegation_rules[word] = target_node
-
- return dict(sorted(delegation_rules.items()))
-
- def _calculate_delegation_hash(self, delegation_rules: Dict[str, str]) -> str:
- """Calculate deterministic hash for delegation rules."""
- rules_json = json.dumps(delegation_rules, sort_keys=True)
- return hashlib.sha256(rules_json.encode()).hexdigest()[:16]
-
- def upgrade_node(
- self,
- domain: str,
- new_agent: Any,
- new_version: NodeVersion,
- force: bool = False
- ) -> bool:
- """
- Upgrade a specific node with comprehensive version validation.
-
- This method performs a controlled upgrade of an individual DAG node:
- 1. Validates the target node exists and is upgradeable
- 2. Checks version compatibility against upgrade policy
- 3. Creates new node instance with updated configuration
- 4. Replaces old node while preserving DAG structure
- 5. Updates delegation hash and DAG metadata
-
- The upgrade process ensures:
- - No breaking changes to DAG structure
- - Version compatibility enforcement
- - Deterministic hash recalculation
- - Rollback capability preservation
-
- Args:
- domain: Domain identifier of the node to upgrade
- new_agent: Updated support agent configuration
- new_version: Target version for the upgrade
- force: Whether to bypass version compatibility checks
-
- Returns:
- True if upgrade succeeded, False if validation failed
-
- Examples:
- >>> success = manager.upgrade_node(
- ... "governance",
- ... enhanced_governance_agent,
- ... NodeVersion(1, 1, 0)
- ... )
- >>> if success:
- ... print("Upgrade completed successfully")
- """
- if not self.current_dag or domain not in self.node_registry:
- return False
-
- current_node = self.node_registry[domain]
-
- if not force and not current_node.can_upgrade_to(new_version):
- return False
-
- old_node_id = current_node.node_id
-
- new_node = StructuredSupportAgentNode(
- node_id=old_node_id,
- name=current_node.name,
- description=new_agent.description,
- support_agent=new_agent,
- node_version=new_version,
- upgrade_policy=current_node.upgrade_policy
- )
-
- if self.current_dag:
- self.current_dag.nodes[old_node_id] = new_node
- self.node_registry[domain] = new_node
-
- if hasattr(self.current_dag, '_rebuild_graph'):
- self.current_dag._rebuild_graph()
-
- return True
-
- def validate_upgrade(self, domain: str, new_version: NodeVersion) -> Dict[str, Any]:
- """
- Validate if a node can be upgraded to the specified version.
-
- This method performs comprehensive upgrade validation:
- 1. Checks if the target node exists in the DAG
- 2. Validates version compatibility against upgrade policy
- 3. Ensures new version is newer than current version
- 4. Checks for potential breaking changes
-
- The validation process helps prevent:
- - Incompatible version upgrades
- - Downgrade attempts
- - Policy violations
- - Breaking changes to DAG structure
-
- Args:
- domain: Domain identifier of the node to validate
- new_version: Proposed version for upgrade
-
- Returns:
- Dictionary containing validation results:
- - "valid": Boolean indicating if upgrade is allowed
- - "reason": Explanation of validation result
- - "current_version": Current node version
- - "upgrade_policy": Current upgrade policy
- - "target_version": Proposed target version
-
- Examples:
- >>> result = manager.validate_upgrade("governance", NodeVersion(2, 0, 0))
- >>> if not result["valid"]:
- ... print(f"Upgrade blocked: {result['reason']}")
- """
- if domain not in self.node_registry:
- return {"valid": False, "reason": "Node not found"}
-
- current_node = self.node_registry[domain]
- can_upgrade = current_node.can_upgrade_to(new_version)
-
- return {
- "valid": can_upgrade,
- "current_version": str(current_node.node_version),
- "target_version": str(new_version),
- "upgrade_policy": current_node.upgrade_policy,
- "reason": "Compatible upgrade" if can_upgrade else "Incompatible version"
- }
-
- def rollback_node(self, domain: str, target_version: NodeVersion) -> bool:
- """
- Rollback a node to a previous version with safety validation.
-
- This method enables controlled rollback of individual nodes:
- 1. Validates the target node exists and supports rollback
- 2. Checks that target version is older than current version
- 3. Creates rollback node instance with previous configuration
- 4. Replaces current node while preserving DAG structure
- 5. Updates delegation hash and DAG metadata
-
- Rollback Safety:
- - Only allows rollback to older versions
- - Preserves DAG structural integrity
- - Maintains deterministic behavior
- - Updates all relevant hashes and metadata
-
- Args:
- domain: Domain identifier of the node to rollback
- target_version: Previous version to rollback to
-
- Returns:
- True if rollback succeeded, False if validation failed
-
- Examples:
- >>> success = manager.rollback_node("governance", NodeVersion(1, 0, 0))
- >>> if success:
- ... print("Rollback completed successfully")
- """
- if domain not in self.node_registry:
- return False
-
- current_node = self.node_registry[domain]
-
- if target_version.is_newer_than(current_node.node_version):
- return False
-
- rollback_node = StructuredSupportAgentNode(
- node_id=current_node.node_id,
- name=current_node.name,
- description=current_node.description,
- support_agent=current_node.support_agent,
- node_version=target_version,
- upgrade_policy=current_node.upgrade_policy
- )
-
- if self.current_dag:
- self.current_dag.nodes[current_node.node_id] = rollback_node
- self.node_registry[domain] = rollback_node
-
- if hasattr(self.current_dag, '_rebuild_graph'):
- self.current_dag._rebuild_graph()
-
- return True
-
- def get_structured_dag_status(self) -> Dict[str, Any]:
- """
- Get comprehensive status of the structured DAG and all its components.
-
- This method provides detailed information about the current DAG state:
- - Overall DAG metadata (name, version, node count)
- - Individual node status (version, hash, upgrade policy)
- - Delegation configuration and hash verification
- - Edge and conditional edge mappings
- - Blockchain readiness indicators
-
- The status information is useful for:
- - Monitoring DAG health and configuration
- - Debugging delegation and routing issues
- - Verifying blockchain compatibility
- - Planning upgrades and maintenance
-
- Returns:
- Dictionary containing comprehensive DAG status:
- - "dag_name": Name of the current DAG
- - "dag_version": Current DAG version
- - "total_nodes": Number of nodes in the DAG
- - "structured_nodes": Detailed node information
- - "delegation_hash": Current delegation hash
- - "edges": DAG edge configuration
- - "conditional_edges": Conditional routing rules
- - "blockchain_ready": Blockchain compatibility status
-
- Examples:
- >>> status = manager.get_structured_dag_status()
- >>> print(f"DAG has {status['total_nodes']} nodes")
- >>> for node_id, info in status['structured_nodes'].items():
- ... print(f"{node_id}: v{info['version']}")
- """
- if not self.current_dag:
- return {"status": "No DAG available"}
-
- structured_nodes = {}
-
- for node_id, node in self.current_dag.nodes.items():
- if isinstance(node, StructuredSupportAgentNode):
- structured_nodes[node_id] = {
- "name": node.name,
- "domain": node.support_agent.domain,
- "version": str(node.node_version),
- "node_hash": node.node_hash,
- "upgrade_policy": node.upgrade_policy
- }
-
- return {
- "dag_name": self.current_dag.name,
- "dag_version": self.dag_version,
- "total_nodes": len(self.current_dag.nodes),
- "structured_nodes": structured_nodes,
- "delegation_hash": self.delegation_hash,
- "edges": self.current_dag.edges,
- "conditional_edges": list(self.current_dag.conditional_edges.keys()),
- "blockchain_ready": True
- }
-
- def export_for_blockchain(self) -> Dict[str, Any]:
- """Export DAG configuration for blockchain storage."""
- if not self.current_dag:
- return {}
-
- return self.current_dag.serialize_for_blockchain()
diff --git a/src/talos/dag/structured_nodes.py b/src/talos/dag/structured_nodes.py
deleted file mode 100644
index c3aba2cf..00000000
--- a/src/talos/dag/structured_nodes.py
+++ /dev/null
@@ -1,410 +0,0 @@
-from __future__ import annotations
-
-import hashlib
-from typing import Any, Dict
-
-from langchain_core.messages import AIMessage
-from pydantic import BaseModel, ConfigDict
-
-from talos.dag.nodes import DAGNode, GraphState
-
-pass
-
-
-class NodeVersion(BaseModel):
- """
- Semantic version information for structured DAG nodes.
-
- This class implements semantic versioning (semver) for blockchain-native
- node upgrades. It provides compatibility checking and version comparison
- methods essential for deterministic upgrade validation.
-
- Attributes:
- major: Major version number (breaking changes)
- minor: Minor version number (backward-compatible features)
- patch: Patch version number (backward-compatible bug fixes)
-
- Version Compatibility Rules:
- - Compatible upgrades: Same major version (1.0.0 -> 1.1.0)
- - Breaking changes: Different major version (1.0.0 -> 2.0.0)
- - Patch updates: Same major.minor (1.0.0 -> 1.0.1)
-
- Examples:
- >>> v1 = NodeVersion(major=1, minor=0, patch=0)
- >>> v2 = NodeVersion(major=1, minor=1, patch=0)
- >>> v1.is_compatible_with(v2) # True - same major version
- >>> v2.is_newer_than(v1) # True - higher minor version
- """
- major: int
- minor: int
- patch: int
-
- def __str__(self) -> str:
- """Return string representation in semver format (major.minor.patch)."""
- return f"{self.major}.{self.minor}.{self.patch}"
-
- def is_compatible_with(self, other: "NodeVersion") -> bool:
- """
- Check if this version is compatible with another version.
-
- Compatibility is determined by major version equality. This ensures
- that breaking changes (major version bumps) are properly detected
- and handled during blockchain-native upgrades.
-
- Args:
- other: The version to compare against
-
- Returns:
- True if versions are compatible (same major version)
- """
- return self.major == other.major
-
- def is_newer_than(self, other: "NodeVersion") -> bool:
- """
- Check if this version is newer than another version.
-
- Uses semantic versioning precedence rules:
- 1. Major version takes precedence
- 2. Minor version compared if major versions equal
- 3. Patch version compared if major.minor equal
-
- Args:
- other: The version to compare against
-
- Returns:
- True if this version is newer than the other
- """
- if self.major != other.major:
- return self.major > other.major
- if self.minor != other.minor:
- return self.minor > other.minor
- return self.patch > other.patch
-
-
-class StructuredSupportAgentNode(DAGNode):
- """
- Structured support agent node with versioning and upgrade capabilities.
-
- This class represents a blockchain-native DAG node that wraps a SupportAgent
- with deterministic versioning, upgrade policies, and hash-based identification.
- It's designed to enable individual component upgrades in a distributed AI system.
-
- Key Features:
- - Semantic versioning with upgrade policy enforcement
- - Deterministic hashing for blockchain compatibility
- - Individual node upgrade capabilities
- - Reproducible serialization for on-chain storage
-
- Blockchain-Native Design:
- - All operations produce deterministic, reproducible results
- - Node hashes are calculated from sorted, canonical representations
- - Upgrade policies ensure safe, validated transitions
- - Serialization maintains consistent ordering for blockchain storage
-
- Upgrade Policies:
- - "compatible": Only allows upgrades within same major version
- - "exact": Requires exact version matches (no upgrades)
- - "any": Allows any newer version upgrade
-
- Attributes:
- support_agent: The wrapped SupportAgent instance
- node_version: Semantic version of this node
- upgrade_policy: Policy governing allowed upgrades
- node_hash: Deterministic hash for blockchain identification
-
- Examples:
- >>> agent = SupportAgent(name="governance", domain="governance", ...)
- >>> node = StructuredSupportAgentNode(
- ... node_id="gov_node",
- ... name="Governance Node",
- ... support_agent=agent,
- ... node_version=NodeVersion(1, 0, 0),
- ... upgrade_policy="compatible"
- ... )
- >>> node.can_upgrade_to(NodeVersion(1, 1, 0)) # True
- >>> node.can_upgrade_to(NodeVersion(2, 0, 0)) # False
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- support_agent: Any
- node_version: NodeVersion
- node_type: str = "structured_support_agent"
- upgrade_policy: str = "compatible"
- node_hash: str = ""
-
- def __init__(self, **data):
- super().__init__(**data)
- self.node_hash = self._calculate_node_hash()
-
- def _calculate_node_hash(self) -> str:
- """
- Calculate deterministic hash for blockchain compatibility.
-
- This method creates a reproducible hash by:
- 1. Extracting all relevant node properties
- 2. Sorting collections to ensure deterministic ordering
- 3. Creating a canonical string representation
- 4. Computing SHA-256 hash for blockchain identification
-
- The hash includes:
- - Node identification (id, name, description)
- - Support agent properties (domain, architecture, keywords)
- - Version information
- - Task patterns and delegation rules
-
- Returns:
- Hexadecimal SHA-256 hash string for blockchain identification
- """
- node_data = {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "version": str(self.node_version),
- "domain": getattr(self.support_agent, 'domain', ''),
- "architecture": getattr(self.support_agent, 'architecture', {})
- }
- import json
- node_json = json.dumps(node_data, sort_keys=True)
- return hashlib.sha256(node_json.encode()).hexdigest()[:16]
-
- def execute(self, state: GraphState) -> GraphState:
- """
- Execute the support agent with the current state.
-
- This method processes the current query through the wrapped support agent,
- maintaining state consistency and message history for the DAG execution.
- It enhances the state with node-specific metadata for blockchain verification.
-
- Args:
- state: Current graph state containing query, context, and results
-
- Returns:
- Updated graph state with execution results and messages
- """
- query = state["current_query"]
- context = state.get("context", {})
-
- context["node_version"] = str(self.node_version)
- context["node_id"] = self.node_id
- context["node_hash"] = self.node_hash
-
- enhanced_context = self.support_agent.analyze_task(query, context)
- result = self.support_agent.execute_task(enhanced_context)
-
- state["results"][self.node_id] = result
- state["messages"].append(
- AIMessage(content=f"Structured agent {self.name} v{self.node_version} executed: {str(result)[:100]}...")
- )
-
- state["metadata"][f"{self.node_id}_execution"] = {
- "version": str(self.node_version),
- "domain": self.support_agent.domain,
- "architecture": self.support_agent.architecture,
- "node_hash": self.node_hash
- }
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- """
- Return configuration for blockchain-native serialization.
-
- This method produces a deterministic, sorted configuration suitable
- for blockchain storage and cross-system compatibility. All collections
- are sorted to ensure reproducible serialization.
-
- Returns:
- Dictionary containing complete node configuration with:
- - Node identification and metadata
- - Support agent configuration
- - Version and upgrade policy information
- - Deterministic hash for verification
- """
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "version": str(self.node_version),
- "upgrade_policy": self.upgrade_policy,
- "node_hash": self.node_hash,
- "support_agent_config": {
- "domain": self.support_agent.domain,
- "architecture": self.support_agent.architecture,
- "delegation_keywords": self.support_agent.delegation_keywords,
- "task_patterns": self.support_agent.task_patterns
- },
- "metadata": self.metadata
- }
-
- def can_upgrade_to(self, new_version: NodeVersion) -> bool:
- """
- Check if this node can be upgraded to the specified version.
-
- This method enforces upgrade policies to ensure safe, validated
- transitions between node versions. It prevents incompatible upgrades
- that could break the DAG's deterministic behavior.
-
- Upgrade Policy Enforcement:
- - "compatible": Allows upgrades within same major version only
- - "exact": Prevents all upgrades (version must match exactly)
- - "any": Allows any newer version (use with caution)
-
- Args:
- new_version: Target version for potential upgrade
-
- Returns:
- True if upgrade is allowed by current policy, False otherwise
-
- Examples:
- >>> node.upgrade_policy = "compatible"
- >>> node.node_version = NodeVersion(1, 0, 0)
- >>> node.can_upgrade_to(NodeVersion(1, 1, 0)) # True
- >>> node.can_upgrade_to(NodeVersion(2, 0, 0)) # False
- """
- if self.upgrade_policy == "exact":
- return new_version == self.node_version
- elif self.upgrade_policy == "compatible":
- return new_version.is_compatible_with(self.node_version) and new_version.is_newer_than(self.node_version)
- elif self.upgrade_policy == "any":
- return True
- return False
-
-
-class StructuredRouterNode(DAGNode):
- """
- Structured router node with deterministic hash-based delegation.
-
- This class implements a blockchain-native routing mechanism that uses
- deterministic keyword matching to delegate tasks to appropriate support
- agents. It ensures reproducible routing decisions across different
- execution environments.
-
- Key Features:
- - Deterministic delegation based on keyword matching
- - Hash-based verification of routing rules
- - Blockchain-compatible serialization
- - Reproducible routing decisions
-
- Blockchain-Native Design:
- - Delegation rules are sorted for deterministic hashing
- - Routing decisions are reproducible and verifiable
- - Configuration serialization maintains consistent ordering
- - Hash verification ensures rule integrity
-
- The router analyzes incoming queries and matches them against predefined
- keyword sets for each domain. The first matching domain is selected,
- with a fallback to "default" if no matches are found.
-
- Attributes:
- delegation_rules: Mapping of domains to keyword lists
- delegation_hash: Deterministic hash of delegation rules
-
- Examples:
- >>> rules = {
- ... "governance": ["proposal", "vote", "dao"],
- ... "analytics": ["data", "report", "metrics"]
- ... }
- >>> router = StructuredRouterNode(
- ... node_id="main_router",
- ... name="Main Router",
- ... delegation_rules=rules
- ... )
- >>> # Query "analyze governance proposal" would route to "governance"
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- delegation_rules: Dict[str, str]
- delegation_hash: str = ""
- node_type: str = "structured_router"
-
- def __init__(self, **data):
- super().__init__(**data)
- self.delegation_hash = self._calculate_delegation_hash()
-
- def _calculate_delegation_hash(self) -> str:
- """
- Calculate deterministic hash for delegation rules.
-
- This method creates a reproducible hash of the delegation rules by:
- 1. Sorting keywords within each domain
- 2. Sorting domains alphabetically
- 3. Creating canonical string representation
- 4. Computing SHA-256 hash for verification
-
- The hash enables blockchain verification that delegation rules
- haven't been tampered with and ensures consistent routing behavior
- across different execution environments.
-
- Returns:
- Hexadecimal SHA-256 hash of sorted delegation rules
- """
- import json
- rules_json = json.dumps(self.delegation_rules, sort_keys=True)
- return hashlib.sha256(rules_json.encode()).hexdigest()[:16]
-
- def execute(self, state: GraphState) -> GraphState:
- """
- Route the query to appropriate support agent based on keywords.
-
- This method implements deterministic routing by:
- 1. Converting query to lowercase for case-insensitive matching
- 2. Iterating through delegation rules in sorted order
- 3. Selecting first domain with matching keywords
- 4. Falling back to "default" if no matches found
-
- The routing decision is deterministic and reproducible, ensuring
- consistent behavior across different execution environments.
-
- Args:
- state: Current graph state containing the query to route
-
- Returns:
- Updated graph state with selected domain and routing result
- """
- query = state["current_query"].lower()
-
- next_node = None
- for keyword, target_node in self.delegation_rules.items():
- if keyword in query:
- next_node = target_node
- break
-
- state["context"]["next_node"] = next_node or "default"
- state["results"][self.node_id] = f"Routed to: {next_node or 'default'}"
- state["metadata"][f"{self.node_id}_routing"] = {
- "delegation_hash": self.delegation_hash,
- "matched_keyword": next((k for k in self.delegation_rules.keys() if k in query), None),
- "target_node": next_node
- }
-
- state["messages"].append(
- AIMessage(content=f"Structured router {self.name} determined path: {next_node or 'default'}")
- )
-
- return state
-
- def get_node_config(self) -> Dict[str, Any]:
- """
- Return configuration for blockchain-native serialization.
-
- This method produces a deterministic configuration suitable for
- blockchain storage. All collections are sorted to ensure reproducible
- serialization across different execution environments.
-
- Returns:
- Dictionary containing complete router configuration with:
- - Node identification and metadata
- - Sorted delegation rules for deterministic serialization
- - Delegation hash for rule verification
- """
- return {
- "node_id": self.node_id,
- "node_type": self.node_type,
- "name": self.name,
- "description": self.description,
- "delegation_rules": self.delegation_rules,
- "delegation_hash": self.delegation_hash,
- "metadata": self.metadata
- }
diff --git a/src/talos/data/dataset_manager.py b/src/talos/data/dataset_manager.py
deleted file mode 100644
index c8c2b30b..00000000
--- a/src/talos/data/dataset_manager.py
+++ /dev/null
@@ -1,261 +0,0 @@
-from __future__ import annotations
-
-import re
-from io import BytesIO
-from typing import Any, Optional, Union
-
-from bs4 import BeautifulSoup
-from langchain_community.vectorstores import FAISS
-from langchain_openai import OpenAIEmbeddings
-from pydantic import BaseModel, ConfigDict, Field
-from pypdf import PdfReader
-
-from talos.tools.ipfs import IpfsTool
-
-
-class DatasetManager(BaseModel):
- """
- A class for managing datasets for the Talos agent.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- datasets: dict[str, Any] = Field(default_factory=dict)
- vector_store: Any = Field(default=None)
- embeddings: Any = Field(default_factory=OpenAIEmbeddings)
- verbose: Union[bool, int] = Field(default=False)
- user_id: Optional[str] = Field(default=None)
- session_id: Optional[str] = Field(default=None)
- use_database: bool = Field(default=False)
-
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- self._db_backend = None
-
- if self.use_database and self.user_id and self.embeddings:
- from ..database.dataset_backend import DatabaseDatasetBackend
- self._db_backend = DatabaseDatasetBackend(
- user_id=self.user_id,
- embeddings_model=self.embeddings,
- session_id=self.session_id,
- verbose=self.verbose
- )
-
- def add_dataset(self, name: str, data: list[str]) -> None:
- """
- Adds a dataset to the DatasetManager.
- """
- if self._db_backend:
- self._db_backend.add_dataset(name, data)
- return
-
- if name in self.datasets and self.datasets.get(name):
- raise ValueError(f"Dataset with name '{name}' already exists.")
- self.datasets[name] = data
- if not data:
- if self._get_verbose_level() >= 1:
- print(f"\033[33m⚠️ Dataset '{name}' added but is empty\033[0m")
- return
- if self.vector_store is None:
- self.vector_store = FAISS.from_texts(data, self.embeddings)
- else:
- self.vector_store.add_texts(data)
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[32m✓ Dataset '{name}' added with {len(data)} chunks\033[0m")
- if verbose_level >= 2:
- print(f" Dataset type: {type(self.vector_store).__name__}")
- print(f" Total datasets: {len(self.datasets)}")
-
- def remove_dataset(self, name: str) -> None:
- """
- Removes a dataset from the DatasetManager.
- """
- if self._db_backend:
- self._db_backend.remove_dataset(name)
- return
-
- if name not in self.datasets:
- raise ValueError(f"Dataset with name '{name}' not found.")
- del self.datasets[name]
- self.vector_store = None
- self._rebuild_vector_store()
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def _rebuild_vector_store(self):
- """Rebuild the vector store from all datasets."""
- for dataset_name, dataset in self.datasets.items():
- if not dataset:
- continue
- if self.vector_store is None:
- self.vector_store = FAISS.from_texts(dataset, self.embeddings)
- else:
- self.vector_store.add_texts(dataset)
-
- def get_dataset(self, name: str) -> Any:
- """
- Gets a dataset by name.
- """
- if self._db_backend:
- return self._db_backend.get_dataset(name)
-
- if name not in self.datasets:
- raise ValueError(f"Dataset with name '{name}' not found.")
- return self.datasets[name]
-
- def get_all_datasets(self) -> dict[str, Any]:
- """
- Gets all registered datasets.
- """
- if self._db_backend:
- return self._db_backend.get_all_datasets()
-
- return self.datasets
-
- def search(self, query: str, k: int = 5, context_search: bool = False) -> list[str]:
- """
- Searches the vector store for similar documents.
- """
- if self._db_backend:
- return self._db_backend.search(query, k, context_search)
-
- if self.vector_store is None:
- if self._get_verbose_level() >= 1 and not context_search:
- print("\033[33m⚠️ Dataset search: no datasets available\033[0m")
- return []
- results = self.vector_store.similarity_search(query, k=k)
- result_texts = [doc.page_content for doc in results]
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1 and result_texts and not context_search:
- print(f"\033[34m🔍 Dataset search: found {len(result_texts)} relevant documents\033[0m")
- if verbose_level >= 2:
- for i, doc_text in enumerate(result_texts[:3], 1):
- content_preview = doc_text[:100] + "..." if len(doc_text) > 100 else doc_text
- print(f" {i}. {content_preview}")
- if len(result_texts) > 3:
- print(f" ... and {len(result_texts) - 3} more documents")
- return result_texts
-
- def add_document_from_ipfs(
- self, name: str, ipfs_hash: str, chunk_size: int = 1000, chunk_overlap: int = 200
- ) -> None:
- """
- Loads a document from IPFS hash and adds it to the dataset with intelligent chunking.
-
- Args:
- name: Name for the dataset
- ipfs_hash: IPFS hash of the document
- chunk_size: Maximum size of each text chunk
- chunk_overlap: Number of characters to overlap between chunks
- """
- if self._db_backend:
- self._db_backend.add_document_from_ipfs(name, ipfs_hash, chunk_size, chunk_overlap)
- return
-
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[36m📦 Fetching content from IPFS: {ipfs_hash}\033[0m")
- if verbose_level >= 2:
- print(f" IPFS hash: {ipfs_hash}")
- ipfs_tool = IpfsTool()
- content = ipfs_tool.get_content(ipfs_hash)
-
- chunks = self._process_and_chunk_content(content, chunk_size, chunk_overlap)
- self.add_dataset(name, chunks)
-
- def add_document_from_url(self, name: str, url: str, chunk_size: int = 1000, chunk_overlap: int = 200) -> None:
- """
- Loads a document from URL and adds it to the dataset with intelligent chunking.
-
- Args:
- name: Name for the dataset
- url: URL of the document
- chunk_size: Maximum size of each text chunk
- chunk_overlap: Number of characters to overlap between chunks
- """
- if self._db_backend:
- self._db_backend.add_document_from_url(name, url, chunk_size, chunk_overlap)
- return
-
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[36m🌐 Fetching content from URL: {url}\033[0m")
- content = self._fetch_content_from_url(url)
- if verbose_level >= 2:
- print(f" URL: {url}")
- content_type = "text/html" # Default content type for verbose output
- print(f" Content type: {content_type}")
- chunks = self._process_and_chunk_content(content, chunk_size, chunk_overlap)
- self.add_dataset(name, chunks)
-
- def _fetch_content_from_url(self, url: str) -> str:
- """Fetch content from URL, handling different content types."""
- from talos.utils.http_client import SecureHTTPClient
- http_client = SecureHTTPClient()
- response = http_client.get(url)
-
- content_type = response.headers.get("content-type", "").lower()
-
- if "application/pdf" in content_type:
- pdf_reader = PdfReader(BytesIO(response.content))
- text = ""
- for page in pdf_reader.pages:
- text += page.extract_text() + "\n"
- return text
- else:
- if "text/html" in content_type:
- soup = BeautifulSoup(response.text, "html.parser")
- for script in soup(["script", "style"]):
- script.decompose()
- return soup.get_text()
- else:
- return response.text
-
- def _process_and_chunk_content(self, content: str, chunk_size: int, chunk_overlap: int) -> list[str]:
- """Process content and split into intelligent chunks."""
- content = self._clean_text(content)
-
- chunks = []
- start = 0
-
- while start < len(content):
- end = start + chunk_size
-
- if end < len(content):
- search_start = max(start + chunk_size - 200, start)
- sentence_end = self._find_sentence_boundary(content, search_start, end)
- if sentence_end > start:
- end = sentence_end
-
- chunk = content[start:end].strip()
- if chunk:
- chunks.append(chunk)
-
- start = max(start + chunk_size - chunk_overlap, end)
-
- if start >= len(content):
- break
-
- return chunks
-
- def _clean_text(self, text: str) -> str:
- """Clean and normalize text content."""
- text = re.sub(r"\n\s*\n\s*\n+", "\n\n", text)
- text = re.sub(r"[ \t]+", " ", text)
- return text.strip()
-
- def _find_sentence_boundary(self, text: str, start: int, end: int) -> int:
- """Find the best sentence boundary within the given range."""
- sentence_pattern = r"[.!?]\s+"
-
- for match in re.finditer(sentence_pattern, text[start:end]):
- boundary = start + match.end()
- if boundary > start:
- return boundary
-
- return end
diff --git a/src/talos/database/__init__.py b/src/talos/database/__init__.py
deleted file mode 100644
index 3ca7f61b..00000000
--- a/src/talos/database/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from .models import User, ConversationHistory, Message
-from .session import get_session, init_database
-from .utils import cleanup_temporary_users, get_user_stats, get_user_by_id
-from .migrations import (
- run_migrations,
- is_database_up_to_date,
- check_migration_status,
- create_migration,
- get_current_revision,
- get_head_revision
-)
-
-__all__ = [
- "User", "ConversationHistory", "Message", "get_session", "init_database",
- "cleanup_temporary_users", "get_user_stats", "get_user_by_id",
- "run_migrations", "is_database_up_to_date", "check_migration_status",
- "create_migration", "get_current_revision", "get_head_revision"
-]
diff --git a/src/talos/database/dataset_backend.py b/src/talos/database/dataset_backend.py
deleted file mode 100644
index db80918e..00000000
--- a/src/talos/database/dataset_backend.py
+++ /dev/null
@@ -1,327 +0,0 @@
-import uuid
-from datetime import datetime
-from typing import Optional, Union
-
-from langchain_community.vectorstores import FAISS
-from langchain_core.embeddings import Embeddings
-
-from .models import Dataset, DatasetChunk, User
-from .session import get_session
-
-
-class DatabaseDatasetBackend:
- """Database-backed dataset implementation using SQLAlchemy."""
-
- def __init__(
- self,
- user_id: str,
- embeddings_model: Embeddings,
- session_id: Optional[str] = None,
- verbose: Union[bool, int] = False,
- ):
- self.user_id = user_id
- self.embeddings_model = embeddings_model
- self.session_id = session_id or str(uuid.uuid4())
- self.verbose = verbose
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def _ensure_user_exists(self) -> User:
- """Ensure user exists in database, create if not."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if not user:
- is_temp = len(self.user_id) == 36 and self.user_id.count("-") == 4
- user = User(user_id=self.user_id, is_temporary=is_temp)
- session.add(user)
- session.commit()
- session.refresh(user)
- else:
- user.last_active = datetime.now()
- session.commit()
- return user
-
- def add_dataset(self, name: str, data: list[str]) -> None:
- """Add a dataset to the database."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- raise ValueError(f"User {self.user_id} not found")
-
- existing_dataset = session.query(Dataset).filter(Dataset.user_id == user.id, Dataset.name == name).first()
-
- if existing_dataset:
- raise ValueError(f"Dataset with name '{name}' already exists.")
-
- if not data:
- dataset = Dataset(user_id=user.id, name=name, dataset_metadata={})
- session.add(dataset)
- session.commit()
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[33m⚠️ Dataset '{name}' added but is empty\033[0m")
- return
-
- dataset = Dataset(user_id=user.id, name=name, dataset_metadata={})
- session.add(dataset)
- session.commit()
- session.refresh(dataset)
-
- for idx, text in enumerate(data):
- embedding = self.embeddings_model.embed_query(text)
- chunk = DatasetChunk(
- dataset_id=dataset.id, content=text, embedding=embedding, chunk_index=idx, chunk_metadata={}
- )
- session.add(chunk)
-
- session.commit()
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[32m✓ Dataset '{name}' added with {len(data)} chunks\033[0m")
- if verbose_level >= 2:
- print(f" Dataset ID: {dataset.id}")
- print(f" Document count: {len(data)}")
-
- def remove_dataset(self, name: str) -> None:
- """Remove a dataset from the database."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- raise ValueError(f"User {self.user_id} not found")
-
- dataset = session.query(Dataset).filter(Dataset.user_id == user.id, Dataset.name == name).first()
-
- if not dataset:
- raise ValueError(f"Dataset with name '{name}' not found.")
-
- session.delete(dataset)
- session.commit()
-
- def get_dataset(self, name: str) -> list[str]:
- """Get a dataset by name."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- raise ValueError(f"User {self.user_id} not found")
-
- dataset = session.query(Dataset).filter(Dataset.user_id == user.id, Dataset.name == name).first()
-
- if not dataset:
- raise ValueError(f"Dataset with name '{name}' not found.")
-
- chunks = (
- session.query(DatasetChunk)
- .filter(DatasetChunk.dataset_id == dataset.id)
- .order_by(DatasetChunk.chunk_index)
- .all()
- )
-
- return [chunk.content for chunk in chunks]
-
- def get_all_datasets(self) -> dict[str, list[str]]:
- """Get all datasets for the user."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- return {}
-
- datasets = session.query(Dataset).filter(Dataset.user_id == user.id).all()
- result = {}
-
- for dataset in datasets:
- chunks = (
- session.query(DatasetChunk)
- .filter(DatasetChunk.dataset_id == dataset.id)
- .order_by(DatasetChunk.chunk_index)
- .all()
- )
- result[dataset.name] = [chunk.content for chunk in chunks]
-
- return result
-
- def search(self, query: str, k: int = 5, context_search: bool = False) -> list[str]:
- """Search datasets using semantic similarity."""
- query_embedding = self.embeddings_model.embed_query(query)
-
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- return []
-
- chunks = (
- session.query(DatasetChunk)
- .join(Dataset)
- .filter(Dataset.user_id == user.id, DatasetChunk.embedding.isnot(None))
- .all()
- )
-
- if not chunks:
- if self._get_verbose_level() >= 1 and not context_search:
- print("\033[33m⚠️ Dataset search: no datasets available\033[0m")
- return []
-
- similarities = []
- for chunk in chunks:
- if chunk.embedding:
- similarity = sum(a * b for a, b in zip(query_embedding, chunk.embedding))
- similarities.append((similarity, chunk))
-
- similarities.sort(key=lambda x: x[0], reverse=True)
- top_chunks = similarities[:k]
-
- results = [chunk.content for _, chunk in top_chunks]
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1 and results and not context_search:
- print(f"\033[34m🔍 Dataset search: found {len(results)} relevant documents\033[0m")
- if verbose_level >= 2:
- for i, result in enumerate(results[:3], 1):
- content_preview = result[:100] + "..." if len(result) > 100 else result
- print(f" {i}. {content_preview}")
- if len(results) > 3:
- print(f" ... and {len(results) - 3} more documents")
- return results
-
- def _build_vector_store(self) -> Optional[FAISS]:
- """Build FAISS vector store from database chunks."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == self.user_id).first()
- if user is None:
- return None
-
- chunks = (
- session.query(DatasetChunk)
- .join(Dataset)
- .filter(Dataset.user_id == user.id, DatasetChunk.embedding.isnot(None))
- .all()
- )
-
- if not chunks:
- return None
-
- texts = []
- embeddings = []
-
- for chunk in chunks:
- if chunk.embedding is not None:
- texts.append(chunk.content)
- embeddings.append(chunk.embedding)
-
- if texts and embeddings:
- return FAISS.from_embeddings(
- text_embeddings=list(zip(texts, embeddings)), embedding=self.embeddings_model
- )
-
- return None
-
- def add_document_from_ipfs(
- self, name: str, ipfs_hash: str, chunk_size: int = 1000, chunk_overlap: int = 200
- ) -> None:
- """Load a document from IPFS hash and add it to the dataset."""
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[36m📦 Fetching content from IPFS: {ipfs_hash}\033[0m")
- if verbose_level >= 2:
- print(f" IPFS hash: {ipfs_hash}")
-
- from ..tools.ipfs import IpfsTool
-
- ipfs_tool = IpfsTool()
- content = ipfs_tool.get_content(ipfs_hash)
-
- chunks = self._process_and_chunk_content(content, chunk_size, chunk_overlap)
- self.add_dataset(name, chunks)
-
- def add_document_from_url(self, name: str, url: str, chunk_size: int = 1000, chunk_overlap: int = 200) -> None:
- """Load a document from URL and add it to the dataset."""
- verbose_level = self._get_verbose_level()
- if verbose_level >= 1:
- print(f"\033[36m🌐 Fetching content from URL: {url}\033[0m")
- if verbose_level >= 2:
- print(f" URL: {url}")
-
- content = self._fetch_content_from_url(url)
- chunks = self._process_and_chunk_content(content, chunk_size, chunk_overlap)
- self.add_dataset(name, chunks)
-
- def _fetch_content_from_url(self, url: str) -> str:
- """Fetch content from URL, handling different content types."""
- from io import BytesIO
-
- from bs4 import BeautifulSoup
- from pypdf import PdfReader
-
- from talos.utils.http_client import SecureHTTPClient
-
- http_client = SecureHTTPClient()
- response = http_client.get(url)
-
- content_type = response.headers.get("content-type", "").lower()
-
- if "application/pdf" in content_type:
- pdf_reader = PdfReader(BytesIO(response.content))
- text = ""
- for page in pdf_reader.pages:
- text += page.extract_text() + "\n"
- return text
- else:
- if "text/html" in content_type:
- soup = BeautifulSoup(response.text, "html.parser")
- for script in soup(["script", "style"]):
- script.decompose()
- return soup.get_text()
- else:
- return response.text
-
- def _process_and_chunk_content(self, content: str, chunk_size: int, chunk_overlap: int) -> list[str]:
- """Process content and split into intelligent chunks."""
- content = self._clean_text(content)
-
- chunks = []
- start = 0
-
- while start < len(content):
- end = start + chunk_size
-
- if end < len(content):
- search_start = max(start + chunk_size - 200, start)
- sentence_end = self._find_sentence_boundary(content, search_start, end)
- if sentence_end > start:
- end = sentence_end
-
- chunk = content[start:end].strip()
- if chunk:
- chunks.append(chunk)
-
- start = max(start + chunk_size - chunk_overlap, end)
-
- if start >= len(content):
- break
-
- return chunks
-
- def _clean_text(self, text: str) -> str:
- """Clean and normalize text content."""
- import re
-
- text = re.sub(r"\n\s*\n\s*\n+", "\n\n", text)
- text = re.sub(r"[ \t]+", " ", text)
- return text.strip()
-
- def _find_sentence_boundary(self, text: str, start: int, end: int) -> int:
- """Find the best sentence boundary within the given range."""
- import re
-
- sentence_pattern = r"[.!?]\s+"
-
- for match in re.finditer(sentence_pattern, text[start:end]):
- boundary = start + match.end()
- if boundary > start:
- return boundary
-
- return end
-
- return end
diff --git a/src/talos/database/migrations.py b/src/talos/database/migrations.py
deleted file mode 100644
index d7c71726..00000000
--- a/src/talos/database/migrations.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""Database migration utilities using Alembic."""
-
-import os
-from typing import Optional
-
-from sqlalchemy.engine import Engine
-
-from alembic import command
-from alembic.config import Config
-from alembic.runtime.migration import MigrationContext
-from alembic.script import ScriptDirectory
-
-from .session import get_database_url
-
-
-def get_alembic_config() -> Config:
- """Get Alembic configuration."""
- # Get the project root directory (go up from src/talos/database to project root)
- project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
- alembic_cfg_path = os.path.join(project_root, "alembic.ini")
-
- # Change to the project root directory so relative paths work
- original_cwd = os.getcwd()
- os.chdir(project_root)
-
- try:
- config = Config(alembic_cfg_path)
-
- # Override the database URL with environment variable if available
- database_url = get_database_url()
- config.set_main_option("sqlalchemy.url", database_url)
-
- # Set the prepend_sys_path to include the src directory
- src_dir = os.path.join(project_root, "src")
- config.set_main_option("prepend_sys_path", src_dir)
-
- return config
- finally:
- # Restore original working directory
- os.chdir(original_cwd)
-
-
-def get_current_revision(engine: Engine) -> Optional[str]:
- """Get the current database revision."""
- with engine.connect() as connection:
- context = MigrationContext.configure(connection)
- return context.get_current_revision()
-
-
-def get_head_revision() -> str | None:
- """Get the head revision from the migration scripts."""
- config = get_alembic_config()
- script_dir = ScriptDirectory.from_config(config)
- return script_dir.get_current_head()
-
-
-def is_database_up_to_date(engine: Engine) -> bool:
- """Check if the database is up to date with the latest migration."""
- current_revision = get_current_revision(engine)
- head_revision = get_head_revision()
- return current_revision == head_revision
-
-
-def run_migrations(engine: Engine) -> None:
- """Run all pending migrations."""
- config = get_alembic_config()
-
- # Set the database URL in the config
- database_url = get_database_url()
- config.set_main_option("sqlalchemy.url", database_url)
-
- # Run migrations
- command.upgrade(config, "head")
-
-
-def run_migrations_to_revision(engine: Engine, revision: str) -> None:
- """Run migrations to a specific revision."""
- config = get_alembic_config()
-
- # Set the database URL in the config
- database_url = get_database_url()
- config.set_main_option("sqlalchemy.url", database_url)
-
- # Run migrations to specific revision
- command.upgrade(config, revision)
-
-
-def create_migration(message: str) -> Optional[str]:
- """Create a new migration file."""
- config = get_alembic_config()
-
- # Set the database URL in the config
- database_url = get_database_url()
- config.set_main_option("sqlalchemy.url", database_url)
-
- # Create migration
- command.revision(config, message=message, autogenerate=True)
-
- # Get the latest migration file
- script_dir = ScriptDirectory.from_config(config)
- head_revision = script_dir.get_current_head()
- return head_revision
-
-
-def check_migration_status(engine: Engine) -> dict[str, str | bool | None]:
- """Check the current migration status."""
- current_revision = get_current_revision(engine)
- head_revision = get_head_revision()
- is_up_to_date = current_revision == head_revision
-
- return {
- "current_revision": current_revision,
- "head_revision": head_revision,
- "is_up_to_date": is_up_to_date,
- "needs_migration": not is_up_to_date,
- }
diff --git a/src/talos/database/models/__init__.py b/src/talos/database/models/__init__.py
deleted file mode 100644
index dd887c38..00000000
--- a/src/talos/database/models/__init__.py
+++ /dev/null
@@ -1,156 +0,0 @@
-from datetime import datetime
-from typing import List, Optional
-
-from sqlalchemy import JSON, DateTime, ForeignKey, Index, Integer, Numeric, String, Text
-from sqlalchemy.orm import Mapped, mapped_column, relationship
-from sqlalchemy.sql import func
-
-from .base import Base
-from .chainlink import ChainlinkBridge
-
-
-class Counter(Base):
- __tablename__ = "counters"
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- name: Mapped[str] = mapped_column(String(255), unique=True, nullable=False, index=True)
- value: Mapped[int] = mapped_column(Integer, default=0)
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
- updated_at: Mapped[datetime] = mapped_column(DateTime, default=func.now(), onupdate=func.now())
-
-
-class Swap(Base):
- __tablename__ = "swaps"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- strategy_id: Mapped[str] = mapped_column(String(255), nullable=False)
- transaction_hash: Mapped[str] = mapped_column(String(66), nullable=False)
- chain_id: Mapped[int] = mapped_column(Integer, nullable=False)
- wallet_address: Mapped[str] = mapped_column(String(42), nullable=False)
- amount_in: Mapped[int] = mapped_column(Numeric(78), nullable=False)
- token_in: Mapped[str] = mapped_column(String(42), nullable=False)
- amount_out: Mapped[int] = mapped_column(
- Numeric(78), nullable=False
- ) # uint256 max is 2^256-1, needs 78 decimal digits
- token_out: Mapped[str] = mapped_column(String(42), nullable=False)
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
-
-
-class User(Base):
- __tablename__ = "users"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[str] = mapped_column(String(255), unique=True, nullable=False, index=True)
- is_temporary: Mapped[bool] = mapped_column(default=False)
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
- last_active: Mapped[datetime] = mapped_column(DateTime, default=func.now(), onupdate=func.now())
-
- conversations: Mapped[List["ConversationHistory"]] = relationship(
- "ConversationHistory", back_populates="user", cascade="all, delete-orphan"
- )
- messages: Mapped[List["Message"]] = relationship("Message", back_populates="user", cascade="all, delete-orphan")
-
-
-class ConversationHistory(Base):
- __tablename__ = "conversation_history"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), nullable=False)
- session_id: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
- updated_at: Mapped[datetime] = mapped_column(DateTime, default=func.now(), onupdate=func.now())
-
- user: Mapped["User"] = relationship("User", back_populates="conversations")
- messages: Mapped[List["Message"]] = relationship(
- "Message", back_populates="conversation", cascade="all, delete-orphan"
- )
-
-
-class Message(Base):
- __tablename__ = "messages"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), nullable=False)
- conversation_id: Mapped[int] = mapped_column(Integer, ForeignKey("conversation_history.id"), nullable=False)
- role: Mapped[str] = mapped_column(String(50), nullable=False) # 'human', 'ai', 'system'
- content: Mapped[str] = mapped_column(Text, nullable=False)
- message_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
- timestamp: Mapped[datetime] = mapped_column(DateTime, default=func.now())
-
- user: Mapped["User"] = relationship("User", back_populates="messages")
- conversation: Mapped["ConversationHistory"] = relationship("ConversationHistory", back_populates="messages")
-
-
-class Memory(Base):
- __tablename__ = "memories"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), nullable=False)
- description: Mapped[str] = mapped_column(Text, nullable=False)
- memory_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
- embedding: Mapped[Optional[List[float]]] = mapped_column(JSON, nullable=True)
- timestamp: Mapped[datetime] = mapped_column(DateTime, default=func.now())
-
- user: Mapped["User"] = relationship("User")
-
-
-class Dataset(Base):
- __tablename__ = "datasets"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), nullable=False)
- name: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
- dataset_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
- updated_at: Mapped[datetime] = mapped_column(DateTime, default=func.now(), onupdate=func.now())
-
- user: Mapped["User"] = relationship("User")
- chunks: Mapped[List["DatasetChunk"]] = relationship(
- "DatasetChunk", back_populates="dataset", cascade="all, delete-orphan"
- )
-
-
-class DatasetChunk(Base):
- __tablename__ = "dataset_chunks"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- dataset_id: Mapped[int] = mapped_column(Integer, ForeignKey("datasets.id"), nullable=False)
- content: Mapped[str] = mapped_column(Text, nullable=False)
- embedding: Mapped[Optional[List[float]]] = mapped_column(JSON, nullable=True)
- chunk_index: Mapped[int] = mapped_column(Integer, nullable=False)
- chunk_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
-
- dataset: Mapped["Dataset"] = relationship("Dataset", back_populates="chunks")
-
-
-class ContractDeployment(Base):
- __tablename__ = "contract_deployments"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
- user_id: Mapped[int] = mapped_column(Integer, ForeignKey("users.id"), nullable=False)
- contract_signature: Mapped[str] = mapped_column(String(66), nullable=False, index=True)
- contract_address: Mapped[str] = mapped_column(String(42), nullable=False, index=True)
- chain_id: Mapped[int] = mapped_column(Integer, nullable=False, index=True)
- salt: Mapped[str] = mapped_column(String(66), nullable=False)
- bytecode_hash: Mapped[str] = mapped_column(String(66), nullable=False)
- deployment_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
- transaction_hash: Mapped[str] = mapped_column(String(66), nullable=False)
- deployed_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
-
- user: Mapped["User"] = relationship("User")
-
- __table_args__ = (Index("idx_signature_chain", "contract_signature", "chain_id", unique=True),)
-
-
-__all__ = [
- "Base",
- "Counter",
- "Swap",
- "User",
- "ConversationHistory",
- "Message",
- "Memory",
- "Dataset",
- "DatasetChunk",
- "ContractDeployment",
- "ChainlinkBridge",
-]
diff --git a/src/talos/database/models/base.py b/src/talos/database/models/base.py
deleted file mode 100644
index 36fb582d..00000000
--- a/src/talos/database/models/base.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from datetime import datetime
-from typing import Any
-
-from sqlalchemy.orm import DeclarativeBase
-
-
-class Base(DeclarativeBase):
- def to_dict(self) -> dict[str, Any]:
- """Convert SQLAlchemy model instance to dictionary for JSON serialization."""
- result = {}
- for column in self.__table__.columns:
- value = getattr(self, column.name)
- if isinstance(value, datetime):
- result[column.name] = value.isoformat()
- else:
- result[column.name] = value
- return result
diff --git a/src/talos/database/models/chainlink.py b/src/talos/database/models/chainlink.py
deleted file mode 100644
index 80227df4..00000000
--- a/src/talos/database/models/chainlink.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from datetime import datetime
-
-from sqlalchemy import DateTime, Integer, Numeric, String, func
-from sqlalchemy.orm import Mapped, mapped_column
-
-from .base import Base
-
-
-class ChainlinkBridge(Base):
- __tablename__ = "chainlink_bridges"
-
- id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
-
- source_chain_id: Mapped[int] = mapped_column(Integer, nullable=False)
- dest_chain_id: Mapped[int] = mapped_column(Integer, nullable=False)
- recipient_address: Mapped[str] = mapped_column(String(42), nullable=False)
- token_address: Mapped[str] = mapped_column(String(42), nullable=False)
- transaction_hash: Mapped[str] = mapped_column(String(66), nullable=False)
- amount: Mapped[int] = mapped_column(Numeric(78), nullable=False)
-
- created_at: Mapped[datetime] = mapped_column(DateTime, default=func.now())
diff --git a/src/talos/database/session.py b/src/talos/database/session.py
deleted file mode 100644
index ec6f956f..00000000
--- a/src/talos/database/session.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-from typing import Optional
-
-from sqlalchemy import create_engine
-from sqlalchemy.orm import Session, sessionmaker
-from sqlalchemy.pool import StaticPool
-
-# Note: Base is imported for potential future use but not currently needed
-# from .models import Base
-
-_SessionLocal: Optional[sessionmaker] = None
-_engine = None
-
-
-def get_database_url() -> str:
- """Get database URL from environment or default to SQLite."""
- db_url = os.getenv("DATABASE_URL")
- if db_url:
- # For SQLite URLs, ensure the directory exists
- if db_url.startswith("sqlite:////"):
- db_path = db_url[10:] # Remove "sqlite:///" prefix
- # Make path absolute if it's not already
- if not os.path.isabs(db_path):
- db_path = f"/{db_path}" # Add leading slash for absolute path
- print("DB PATH:", db_path)
- db_dir = os.path.dirname(db_path)
- if db_dir and not os.path.exists(db_dir):
- os.makedirs(db_dir, exist_ok=True)
- # Convert to 4-slash format for absolute paths
- if os.path.isabs(db_path):
- db_url = f"sqlite:////{db_path}"
- return db_url
-
- db_path = "/app/data"
- if not os.path.exists(db_path):
- os.makedirs(db_path, exist_ok=True)
- return f"sqlite:///{db_path}/talos_data.db"
-
-
-def init_database(database_url: Optional[str] = None) -> None:
- """Initialize the database connection."""
- global _SessionLocal, _engine
-
- if database_url is None:
- database_url = get_database_url()
-
- if database_url.startswith("sqlite"):
- _engine = create_engine(
- database_url, connect_args={"check_same_thread": False}, poolclass=StaticPool, echo=False
- )
- else:
- _engine = create_engine(database_url, echo=False)
-
- _SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=_engine)
-
- # Note: Tables are created by Alembic migrations, not here
-
-
-def get_session() -> Session:
- """Get a database session."""
- if _SessionLocal is None:
- init_database()
-
- assert _SessionLocal is not None
- return _SessionLocal()
diff --git a/src/talos/database/utils.py b/src/talos/database/utils.py
deleted file mode 100644
index 88fc2ec4..00000000
--- a/src/talos/database/utils.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from datetime import datetime, timedelta
-from typing import Optional
-
-from .models import User
-from .session import get_session
-
-
-def cleanup_temporary_users(older_than_hours: int = 24) -> int:
- """
- Clean up temporary users and their data older than specified hours.
-
- Args:
- older_than_hours: Remove temporary users inactive for this many hours
-
- Returns:
- Number of users cleaned up
- """
- cutoff_time = datetime.now() - timedelta(hours=older_than_hours)
-
- with get_session() as session:
- temp_users = session.query(User).filter(User.is_temporary, User.last_active < cutoff_time).all()
-
- count = len(temp_users)
- for user in temp_users:
- session.delete(user) # Cascade will delete related data
-
- session.commit()
- return count
-
-
-def get_user_stats() -> dict:
- """Get statistics about users in the database."""
- with get_session() as session:
- total_users = session.query(User).count()
- temp_users = session.query(User).filter(User.is_temporary).count()
- permanent_users = total_users - temp_users
-
- return {"total_users": total_users, "permanent_users": permanent_users, "temporary_users": temp_users}
-
-
-def get_user_by_id(user_id: str) -> Optional[User]:
- """Get a user by their user_id."""
- with get_session() as session:
- return session.query(User).filter(User.user_id == user_id).first()
diff --git a/src/talos/hypervisor/hypervisor.py b/src/talos/hypervisor/hypervisor.py
deleted file mode 100644
index caf1ee38..00000000
--- a/src/talos/hypervisor/hypervisor.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import Any
-
-from talos.core.agent import Agent
-from talos.hypervisor.supervisor import Supervisor
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.tools.tool_manager import ToolManager
-
-
-class Hypervisor(Agent, Supervisor):
- """
- A class to monitor the agent's actions.
- """
-
- prompts_dir: str
- agent: Agent | None = None
-
- def model_post_init(self, __context: Any) -> None:
- self.prompt_manager = FilePromptManager(self.prompts_dir)
- self.tool_manager = ToolManager()
-
- def register_agent(self, agent: Agent):
- """
- Registers an agent with the hypervisor.
- """
- self.agent = agent
-
- def approve(self, action: str, args: dict) -> tuple[bool, str | None]:
- """
- Approves or denies an action.
- """
- from talos.utils.validation import sanitize_user_input
-
- if not self.prompt_manager:
- raise ValueError("Prompt manager not initialized.")
-
- if not action or not action.strip():
- raise ValueError("Action cannot be empty")
-
- action = sanitize_user_input(action, max_length=1000)
-
- if not isinstance(args, dict):
- raise ValueError("Args must be a dictionary")
-
- agent_history = self.agent.history if self.agent else []
- prompt = self.prompt_manager.get_prompt("hypervisor")
- if not prompt:
- raise ValueError("Hypervisor prompt not found.")
- response = self.run(
- prompt.format(
- messages=agent_history,
- action=action,
- args=args,
- agent_history=agent_history,
- )
- )
-
- try:
- result = json.loads(str(response))
- except json.JSONDecodeError as e:
- raise ValueError(f"Invalid JSON response from hypervisor: {e}")
-
- if not isinstance(result, dict):
- raise ValueError("Hypervisor response must be a JSON object")
-
- if result.get("approve"):
- return True, None
- return False, result.get("reason")
diff --git a/src/talos/hypervisor/supervisor.py b/src/talos/hypervisor/supervisor.py
deleted file mode 100644
index 2ec42232..00000000
--- a/src/talos/hypervisor/supervisor.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Callable, Optional, Tuple
-
-from pydantic import BaseModel, Field
-
-
-class Rule(BaseModel):
- """
- A rule for a supervisor to follow.
- """
-
- tool_name: str
- # A function that takes the tool arguments and returns whether the action is
- # approved.
- # The `Any` is the value of the argument.
- # The `bool` is whether the action is approved.
- # The `Callable` is the function that takes the value and returns the bool.
- # The `dict` is the dictionary of arguments.
- # So the whole type hint is a dictionary of argument names to functions that
- # approve or deny the action.
- validations: dict[str, Callable[[Any], Tuple[bool, Optional[str]]]] = Field(default_factory=dict)
-
-
-class Supervisor(BaseModel, ABC):
- """
- An abstract base class for supervisors.
- """
-
- @abstractmethod
- def approve(self, action: str, args: dict) -> tuple[bool, str | None]:
- """
- Approves or denies an action.
- """
- pass
-
-
-class RuleBasedSupervisor(Supervisor):
- """
- A supervisor that uses a set of rules to approve or deny actions.
- """
-
- rules: list[Rule]
-
- def approve(self, action: str, args: dict) -> tuple[bool, str | None]:
- """
- Approves or denies an action based on the rules.
- """
- for rule in self.rules:
- if rule.tool_name == action:
- for arg_name, validation_fn in rule.validations.items():
- if arg_name in args:
- approved, error_message = validation_fn(args[arg_name])
- if not approved:
- return False, error_message
- return True, None
diff --git a/src/talos/jobs/__init__.py b/src/talos/jobs/__init__.py
deleted file mode 100644
index c872b56c..00000000
--- a/src/talos/jobs/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Scheduled jobs package for Talos agent.
-"""
diff --git a/src/talos/jobs/example_jobs.py b/src/talos/jobs/example_jobs.py
deleted file mode 100644
index 5da0f89c..00000000
--- a/src/talos/jobs/example_jobs.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from __future__ import annotations
-
-import logging
-from datetime import datetime, timedelta
-from typing import Any
-
-from talos.core.scheduled_job import ScheduledJob
-
-logger = logging.getLogger(__name__)
-
-
-class HealthCheckJob(ScheduledJob):
- """
- Example scheduled job that performs a health check every hour.
- """
-
- def __init__(self, **kwargs):
- super().__init__(
- name="health_check",
- description="Performs a health check of the agent system",
- cron_expression="0 * * * *", # Every hour at minute 0
- **kwargs
- )
-
- async def run(self, **kwargs: Any) -> str:
- """
- Perform a health check of the agent system.
- """
- logger.info("Running health check job")
-
- current_time = datetime.now()
- health_status = {
- "timestamp": current_time.isoformat(),
- "status": "healthy",
- "uptime": "running",
- "memory_usage": "normal"
- }
-
- logger.info(f"Health check completed: {health_status}")
- return f"Health check completed at {current_time}: System is healthy"
-
-
-class DailyReportJob(ScheduledJob):
- """
- Example scheduled job that generates a daily report at 9 AM.
- """
-
- def __init__(self, **kwargs):
- super().__init__(
- name="daily_report",
- description="Generates a daily activity report",
- cron_expression="0 9 * * *", # Daily at 9 AM
- **kwargs
- )
-
- async def run(self, **kwargs: Any) -> str:
- """
- Generate a daily activity report.
- """
- logger.info("Running daily report job")
-
- current_date = datetime.now().strftime("%Y-%m-%d")
- report_data = {
- "date": current_date,
- "tasks_completed": 0,
- "skills_used": [],
- "memory_entries": 0
- }
-
- logger.info(f"Daily report generated: {report_data}")
- return f"Daily report for {current_date} completed with {report_data['tasks_completed']} tasks and {report_data['memory_entries']} memory entries"
-
-
-class OneTimeMaintenanceJob(ScheduledJob):
- """
- Example one-time scheduled job for maintenance tasks.
- """
-
- def __init__(self, execute_at: datetime, **kwargs):
- super().__init__(
- name="maintenance_task",
- description="Performs one-time maintenance task",
- execute_at=execute_at,
- **kwargs
- )
-
- async def run(self, **kwargs: Any) -> str:
- """
- Perform a one-time maintenance task.
- """
- logger.info("Running one-time maintenance job")
-
- maintenance_tasks = [
- "Clean temporary files",
- "Optimize memory usage",
- "Update internal metrics"
- ]
-
- for task in maintenance_tasks:
- logger.info(f"Executing maintenance task: {task}")
-
- completion_time = datetime.now()
- logger.info(f"Maintenance completed at {completion_time}")
- return f"Maintenance tasks completed at {completion_time}"
-
-
-def create_example_jobs() -> list[ScheduledJob]:
- """
- Create a list of example scheduled jobs for demonstration.
-
- Returns:
- List of example ScheduledJob instances
- """
- jobs = [
- HealthCheckJob(),
- DailyReportJob(),
- OneTimeMaintenanceJob(execute_at=datetime.now() + timedelta(minutes=5))
- ]
-
- return jobs
diff --git a/src/talos/models/__init__.py b/src/talos/models/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/models/arbiscan.py b/src/talos/models/arbiscan.py
deleted file mode 100644
index da728b7e..00000000
--- a/src/talos/models/arbiscan.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from pydantic import BaseModel, Field
-from typing import List, Dict, Any, Union
-
-
-class ContractSourceCode(BaseModel):
- source_code: str = Field(..., alias="SourceCode", description="The source code of the contract")
- abi: str = Field(..., alias="ABI", description="The ABI of the contract as a JSON string")
- contract_name: str = Field(..., alias="ContractName", description="The name of the contract")
- compiler_version: str = Field(..., alias="CompilerVersion", description="The compiler version used")
- optimization_used: str = Field(..., alias="OptimizationUsed", description="Whether optimization was used")
- runs: str = Field(..., alias="Runs", description="Number of optimization runs")
- constructor_arguments: str = Field(..., alias="ConstructorArguments", description="Constructor arguments")
- evm_version: str = Field(..., alias="EVMVersion", description="EVM version used")
- library: str = Field(..., alias="Library", description="Library information")
- license_type: str = Field(..., alias="LicenseType", description="License type")
- proxy: str = Field(..., alias="Proxy", description="Proxy information")
- implementation: str = Field(..., alias="Implementation", description="Implementation address if proxy")
- swarm_source: str = Field(..., alias="SwarmSource", description="Swarm source")
-
-
-class ContractABI(BaseModel):
- abi: List[Dict[str, Any]] = Field(..., description="The parsed ABI as a list of dictionaries")
-
-
-class ArbiScanResponse(BaseModel):
- status: str = Field(..., description="Response status")
- message: str = Field(..., description="Response message")
- result: Union[List[ContractSourceCode], str] = Field(..., description="List of contract source code data or error message")
-
-
-class ArbiScanABIResponse(BaseModel):
- status: str = Field(..., description="Response status")
- message: str = Field(..., description="Response message")
- result: str = Field(..., description="ABI as JSON string or error message")
diff --git a/src/talos/models/contract_deployment.py b/src/talos/models/contract_deployment.py
deleted file mode 100644
index 16b868ad..00000000
--- a/src/talos/models/contract_deployment.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-
-from typing import Optional
-
-from pydantic import BaseModel, Field
-
-
-class ContractDeploymentRequest(BaseModel):
- bytecode: str = Field(..., description="Contract bytecode to deploy")
- salt: str = Field(..., description="Salt for CREATE2 deployment")
- chain_id: int = Field(..., description="Chain ID to deploy on")
- constructor_args: Optional[list] = Field(None, description="Constructor arguments")
- gas_limit: Optional[int] = Field(None, description="Gas limit for deployment")
- gas_price: Optional[int] = Field(None, description="Gas price in wei")
-
-
-class ContractDeploymentResult(BaseModel):
- contract_address: str = Field(..., description="Deployed contract address")
- transaction_hash: str = Field(..., description="Deployment transaction hash")
- contract_signature: str = Field(..., description="Contract signature (hash)")
- chain_id: int = Field(..., description="Chain ID deployed on")
- gas_used: Optional[int] = Field(None, description="Gas used for deployment")
- was_duplicate: bool = Field(False, description="Whether this was a duplicate deployment")
diff --git a/src/talos/models/dexscreener.py b/src/talos/models/dexscreener.py
deleted file mode 100644
index 537f5cb9..00000000
--- a/src/talos/models/dexscreener.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from pydantic import BaseModel, Field
-
-
-class DexscreenerData(BaseModel):
- price_usd: float = Field(..., alias="priceUsd")
- price_change_h24: float = Field(..., alias="priceChange", description="Price change in the last 24 hours")
- volume_h24: float = Field(..., alias="volume", description="Volume in the last 24 hours")
diff --git a/src/talos/models/evaluation.py b/src/talos/models/evaluation.py
deleted file mode 100644
index 0b7a3b41..00000000
--- a/src/talos/models/evaluation.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from typing import Any, Dict
-
-from pydantic import BaseModel, Field
-
-
-class EvaluationResult(BaseModel):
- score: int = Field(..., ge=0, le=100, description="The evaluation score, from 0 to 100.")
- additional_data: Dict[str, Any] = Field({}, description="A dictionary of additional data from the evaluation.")
diff --git a/src/talos/models/gecko_terminal.py b/src/talos/models/gecko_terminal.py
deleted file mode 100644
index ad72b1ae..00000000
--- a/src/talos/models/gecko_terminal.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import annotations
-
-from pydantic import BaseModel, ConfigDict, Field
-
-
-class OHLCV(BaseModel):
- timestamp: int
- open: float
- high: float
- low: float
- close: float
- volume: float
-
-
-class GeckoTerminalOHLCVData(BaseModel):
- model_config = ConfigDict(from_attributes=True)
- ohlcv_list: list[OHLCV] = Field(..., alias="ohlcv_list")
diff --git a/src/talos/models/proposals/__init__.py b/src/talos/models/proposals/__init__.py
deleted file mode 100644
index 2d0fadd6..00000000
--- a/src/talos/models/proposals/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .models import Feedback, Plan, Proposal, ProposalResponse, QueryResponse, Question, RunParams
-
-__all__ = ["Feedback", "Plan", "Proposal", "ProposalResponse", "QueryResponse", "Question", "RunParams"]
diff --git a/src/talos/models/proposals/models.py b/src/talos/models/proposals/models.py
deleted file mode 100644
index deb9257d..00000000
--- a/src/talos/models/proposals/models.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from __future__ import annotations
-
-from pydantic import BaseModel
-
-
-class Feedback(BaseModel):
- delegate: str
- feedback: str
-
-
-class Proposal(BaseModel):
- proposal_text: str
- feedback: list[Feedback]
-
-
-class QueryResponse(BaseModel):
- answers: list[str]
-
-
-class ProposalResponse(BaseModel):
- answers: list[str]
- confidence_score: float | None = None
- reasoning: str | None = None
- decision: str | None = None
-
-
-class Question(BaseModel):
- text: str
- feedback: list[Feedback]
-
-
-class Plan(BaseModel):
- plan: str
-
-
-class RunParams(BaseModel):
- tool: str | None = None
- tool_args: dict | None = None
- prompt: str | None = None
- prompt_args: dict | None = None
- discipline: str | None = None
diff --git a/src/talos/models/services.py b/src/talos/models/services.py
deleted file mode 100644
index 24a2d6e7..00000000
--- a/src/talos/models/services.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from enum import Enum
-from typing import Any
-
-from pydantic import BaseModel, Field
-
-
-class TicketStatus(str, Enum):
- """
- The status of a ticket.
- """
-
- PENDING = "PENDING"
- RUNNING = "RUNNING"
- COMPLETED = "COMPLETED"
- FAILED = "FAILED"
- CANCELLED = "CANCELLED"
-
-
-class TicketCreationRequest(BaseModel):
- """
- A request to create a ticket.
- """
-
- tool: str
- tool_args: dict[str, Any]
-
-
-class Ticket(BaseModel):
- """
- A ticket for a long-running process.
- """
-
- ticket_id: str = Field(..., description="The ID of the ticket.")
- status: TicketStatus = Field(..., description="The status of the ticket.")
- created_at: str = Field(..., description="The timestamp when the ticket was created.")
- updated_at: str = Field(..., description="The timestamp when the ticket was last updated.")
- request: TicketCreationRequest = Field(..., description="The request that created the ticket.")
-
-
-class TicketResult(BaseModel):
- """
- The result of a ticket.
- """
-
- ticket_id: str = Field(..., description="The ID of the ticket.")
- status: TicketStatus = Field(..., description="The status of the ticket.")
- result: Any | None = Field(None, description="The result of the ticket.")
- error: str | None = Field(None, description="The error message if the ticket failed.")
-
-
-class TwitterSentimentResponse(BaseModel):
- answers: list[str]
- score: float | None = Field(default=None, description="The sentiment score between 0-100.")
-
-
-class PRReviewResponse(BaseModel):
- answers: list[str]
- security_score: float | None = Field(default=None, description="Security assessment score 0-100")
- quality_score: float | None = Field(default=None, description="Code quality score 0-100")
- recommendation: str | None = Field(default=None, description="APPROVE, COMMENT, or REQUEST_CHANGES")
- reasoning: str | None = Field(default=None, description="Detailed reasoning for the recommendation")
diff --git a/src/talos/models/twitter.py b/src/talos/models/twitter.py
deleted file mode 100644
index 3d3a4d07..00000000
--- a/src/talos/models/twitter.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from datetime import datetime
-from typing import Optional
-
-from pydantic import BaseModel, Field
-
-
-class TwitterPublicMetrics(BaseModel):
- followers_count: int
- following_count: int
- tweet_count: int
- listed_count: int
- like_count: int
- media_count: int = Field(default=0)
-
-
-class TwitterUser(BaseModel):
- id: int
- username: str
- name: str
- created_at: datetime
- profile_image_url: str
- public_metrics: TwitterPublicMetrics
- description: str | None = None
- url: str | None = None
- verified: bool = False
-
-
-class ReferencedTweet(BaseModel):
- type: str
- id: int
-
-
-class Tweet(BaseModel):
- id: int
- text: str
- author_id: str
- created_at: Optional[str] = None
- conversation_id: Optional[str] = None
- public_metrics: dict = Field(default_factory=dict)
- referenced_tweets: Optional[list[ReferencedTweet]] = None
- in_reply_to_user_id: Optional[str] = None
- edit_history_tweet_ids: Optional[list[str]] = None
-
- def is_reply_to(self, tweet_id: str) -> bool:
- """Check if this tweet is a reply to the specified tweet ID."""
- if not self.referenced_tweets:
- return False
- return any(
- ref.type == "replied_to" and ref.id == tweet_id
- for ref in self.referenced_tweets
- )
-
- def get_replied_to_id(self) -> Optional[int]:
- """Get the ID of the tweet this is replying to, if any."""
- if not self.referenced_tweets:
- return None
- for ref in self.referenced_tweets:
- if ref.type == "replied_to":
- return ref.id
- return None
-
-
-class TwitterPersonaResponse(BaseModel):
- report: str = Field(..., description="General report on the user's persona and communication style")
- topics: list[str] = Field(..., description="List of topics the user typically discusses")
- style: list[str] = Field(..., description="List of adjectives describing the user's communication style")
diff --git a/src/talos/prompts/__init__.py b/src/talos/prompts/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/prompts/codebase_evaluation_prompt.json b/src/talos/prompts/codebase_evaluation_prompt.json
deleted file mode 100644
index ad4645ee..00000000
--- a/src/talos/prompts/codebase_evaluation_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "codebase_evaluation_prompt",
- "template": "You are an expert software engineer conducting a comprehensive codebase evaluation for: {repo_identifier}\n\nRepository Structure Analysis:\n- Total files: {file_count}\n- Has README: {has_readme}\n- Has tests: {has_tests}\n- Has documentation: {has_docs}\n- Configuration files: {config_files}\n\nStructure Details:\n{structure}\n\nKey Files Content:\n{key_files}\n\nPlease provide a comprehensive evaluation covering:\n\n## Code Quality Assessment\n1. **Project Structure & Organization**\n - Evaluate directory structure and file organization\n - Assess separation of concerns and modularity\n - Rate: Excellent/Good/Fair/Poor\n\n2. **Documentation Quality**\n - README completeness and clarity\n - Code comments and docstrings\n - API documentation\n - Rate: Excellent/Good/Fair/Poor\n\n3. **Testing Strategy**\n - Test coverage and organization\n - Test quality and patterns\n - CI/CD setup indicators\n - Rate: Excellent/Good/Fair/Poor\n\n4. **Code Patterns & Best Practices**\n - Coding standards adherence\n - Error handling patterns\n - Security considerations\n - Rate: Excellent/Good/Fair/Poor\n\n5. **Maintainability**\n - Code complexity and readability\n - Dependency management\n - Configuration management\n - Rate: Excellent/Good/Fair/Poor\n\n## Priority Improvement Recommendations\nProvide 5-7 specific, actionable recommendations ranked by priority:\n\n**HIGH PRIORITY:**\n- [Specific recommendation with rationale]\n\n**MEDIUM PRIORITY:**\n- [Specific recommendation with rationale]\n\n**LOW PRIORITY:**\n- [Specific recommendation with rationale]\n\n## Overall Assessment\nProvide an overall quality score (1-10) and summary of the codebase's current state and potential.",
- "input_variables": ["repo_identifier", "structure", "key_files", "file_count", "has_readme", "has_tests", "has_docs", "config_files"]
-}
diff --git a/src/talos/prompts/configs/adaptive_agent_config.json b/src/talos/prompts/configs/adaptive_agent_config.json
deleted file mode 100644
index 08ec12a4..00000000
--- a/src/talos/prompts/configs/adaptive_agent_config.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "name": "adaptive_agent_config",
- "description": "Adaptive prompt configuration that selects prompts based on context",
- "selector": {
- "type": "conditional",
- "conditions": {
- "has_voice_analysis": "voice_enhanced_agent_prompt",
- "is_proposal_context": "proposal_evaluation_prompt",
- "is_github_context": "github_pr_review_prompt"
- },
- "default_prompt": "main_agent_prompt"
- },
- "variables": {
- "system_mode": "autonomous",
- "safety_level": "high"
- },
- "transformations": {
- "system_mode": "uppercase"
- }
-}
diff --git a/src/talos/prompts/crypto_influencer_analysis_prompt.json b/src/talos/prompts/crypto_influencer_analysis_prompt.json
deleted file mode 100644
index 692626cf..00000000
--- a/src/talos/prompts/crypto_influencer_analysis_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "crypto_influencer_analysis_prompt",
- "description": "A prompt to analyze a crypto Twitter influencer's relevance and influence.",
- "template": "Analyze the crypto Twitter influencer @{username} based on the following evaluation data:\n\nInfluencer Score: {score}/100\nFollowers: {followers_count:,}\n\nDetailed Metrics:\n- Crypto Relevance Score: {evaluation_data[crypto_relevance_score]}/100\n- Engagement Score: {evaluation_data[engagement_score]}/100\n- Authenticity Score: {evaluation_data[authenticity_score]}/100\n- Influence Score: {evaluation_data[influence_score]}/100\n- Crypto Content Percentage: {evaluation_data[crypto_content_percentage]:.1f}%\n\nProvide a comprehensive analysis of this user's relevance as a crypto influencer, including:\n1. Overall assessment of their influence in the crypto space\n2. Strengths and weaknesses as an influencer\n3. Content quality and engagement patterns\n4. Recommendations for protocol teams considering partnerships\n5. Risk factors or concerns to be aware of\n\nBe specific and actionable in your analysis.",
- "input_variables": ["username", "score", "evaluation_data", "followers_count"]
-}
diff --git a/src/talos/prompts/crypto_relevance_evaluation_prompt.json b/src/talos/prompts/crypto_relevance_evaluation_prompt.json
deleted file mode 100644
index f92816e9..00000000
--- a/src/talos/prompts/crypto_relevance_evaluation_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "crypto_relevance_evaluation_prompt",
- "description": "A prompt to evaluate how focused on crypto and how meaningful a user's tweets are.",
- "template": "Analyze the following tweets from a Twitter user to evaluate their crypto focus and meaningfulness:\n\nTweets:\n{tweets_text}\n\nPlease evaluate this user on two dimensions:\n\n1. **Crypto Focus (0-100)**: How much of their content is genuinely focused on cryptocurrency, blockchain, DeFi, NFTs, or related topics? Consider:\n - Percentage of tweets that discuss crypto topics\n - Depth of crypto knowledge demonstrated\n - Whether they're just mentioning crypto casually vs. having substantive discussions\n\n2. **Meaningfulness (0-100)**: How valuable and insightful is their crypto-related content? Consider:\n - Quality of analysis and insights\n - Educational value for followers\n - Original thoughts vs. just resharing/reacting\n - Technical depth and accuracy\n - Influence on crypto discourse\n\nRespond with ONLY a JSON object in this exact format:\n{\n \"crypto_focus_score\": ,\n \"meaningfulness_score\": ,\n \"reasoning\": \"\"\n}",
- "input_variables": ["tweets_text"]
-}
diff --git a/src/talos/prompts/default_prompt.json b/src/talos/prompts/default_prompt.json
deleted file mode 100644
index fd22cb05..00000000
--- a/src/talos/prompts/default_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "default",
- "template": "{messages}",
- "input_variables": ["messages"]
-}
diff --git a/src/talos/prompts/execution_planner_prompt.json b/src/talos/prompts/execution_planner_prompt.json
deleted file mode 100644
index affc15ce..00000000
--- a/src/talos/prompts/execution_planner_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "execution_planner_prompt",
- "template": "You are Talos, an AI agent designed to generate execution plans based on community feedback. You will be given a question and feedback from multiple trusted delegates to facilitate you making a decision on the plan.\n\nYour goal is to create a system where everyone can work together for sustainable growth. You use a 'stag hunt' model to encourage cooperation. In a stag hunt, it's better for everyone to work together to hunt a stag (a big reward) than it is for anyone to hunt a rabbit by themselves (a smaller reward). In the same way, you should create a plan that rewards users who work together toward the long-term success of the protocol.\n\nQuestion: {question}\n\nDelegate Feedback:\n{feedback}\n\nPlease provide your execution plan.",
- "input_variables": ["question", "feedback"]
-}
diff --git a/src/talos/prompts/general_agent_prompt.json b/src/talos/prompts/general_agent_prompt.json
deleted file mode 100644
index 1b8669f6..00000000
--- a/src/talos/prompts/general_agent_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "general_agent_prompt",
- "description": "The general prompt for all agents.",
- "template": "\n\nIt is currently {time}. You have the following services available: {available_services}. You have the following active tickets:\n{active_tickets}\n\nWhat would you like to do? Keep in mind that you can only interact with the user and the available services. You can also create new tickets to delegate tasks to other agents.",
- "input_variables": ["time", "available_services", "active_tickets"]
-}
diff --git a/src/talos/prompts/general_influence_analysis_prompt.json b/src/talos/prompts/general_influence_analysis_prompt.json
deleted file mode 100644
index 57fc7699..00000000
--- a/src/talos/prompts/general_influence_analysis_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "general_influence_analysis_prompt",
- "description": "A prompt to analyze a Twitter account's general influence and perception.",
- "template": "Analyze the Twitter account @{username} based on the following evaluation data:\n\nInfluence Score: {score}/100\nFollowers: {followers_count:,}\n\nDetailed Metrics:\n- Content Quality Score: {evaluation_data[content_quality_score]}/100\n- Engagement Score: {evaluation_data[engagement_score]}/100\n- Authenticity Score: {evaluation_data[authenticity_score]}/100\n- Influence Score: {evaluation_data[influence_score]}/100\n- Credibility Score: {evaluation_data[credibility_score]}/100\n- Total Tweets Analyzed: {evaluation_data[total_tweets_analyzed]}\n\nProvide a comprehensive analysis of this user's general influence and perception, including:\n\n1. **Overall Influence Assessment**: Evaluate their reach and impact across their audience\n2. **Content Quality & Engagement**: Analyze the quality of their content and how well it resonates with followers\n3. **Authenticity & Credibility**: Assess how genuine and trustworthy the account appears\n4. **Strengths & Weaknesses**: Identify key areas where they excel or could improve\n5. **Influence Factors**: What makes this account influential (or not) in their domain\n6. **Perception Indicators**: How this account is likely perceived by the general public\n7. **Risk Assessment**: Any potential concerns or red flags to be aware of\n\nBe specific, balanced, and actionable in your analysis. Consider how this person's influence and perception could impact decision-making processes.",
- "input_variables": ["username", "score", "evaluation_data", "followers_count"]
-}
diff --git a/src/talos/prompts/general_influence_authenticity_prompt.json b/src/talos/prompts/general_influence_authenticity_prompt.json
deleted file mode 100644
index 4e1151e2..00000000
--- a/src/talos/prompts/general_influence_authenticity_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "general_influence_authenticity_prompt",
- "description": "A prompt to analyze Twitter account authenticity and detect bot followers or artificial engagement.",
- "template": "Analyze the following Twitter account data for authenticity indicators:\n\nAccount Details:\n- Username: {username}\n- Account Age: {account_age_days} days\n- Followers: {followers_count}\n- Following: {following_count}\n- Verified: {is_verified}\n\nRecent Tweets:\n{tweets_text}\n\nEngagement Patterns:\n{engagement_summary}\n\nBased on this data, assess the likelihood that this account uses artificial engagement, bot followers, or other inauthentic tactics. Consider:\n1. Posting frequency and timing patterns\n2. Content originality and diversity\n3. Engagement rate consistency\n4. Follower-to-following ratios\n5. Account completeness vs. age\n\nProvide your assessment as JSON:\n{\n \"authenticity_score\": <0-100>,\n \"bot_likelihood\": <\"low\"|\"medium\"|\"high\">,\n \"suspicious_indicators\": [\"list of concerns\"],\n \"authentic_indicators\": [\"list of positive signs\"]\n}",
- "input_variables": ["username", "account_age_days", "followers_count", "following_count", "is_verified", "tweets_text", "engagement_summary"]
-}
diff --git a/src/talos/prompts/general_influence_content_quality_prompt.json b/src/talos/prompts/general_influence_content_quality_prompt.json
deleted file mode 100644
index 4c67e703..00000000
--- a/src/talos/prompts/general_influence_content_quality_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "general_influence_content_quality_prompt",
- "description": "A prompt to evaluate the content quality and originality of tweets for general influence assessment.",
- "template": "Analyze the following tweets for content quality and originality. Provide scores from 0-100 for each metric.\n\nTweets to analyze:\n{tweets_text}\n\nEvaluate based on:\n1. Content Quality (0-100): Assess the thoughtfulness, coherence, and value of the content. Consider factors like:\n - Depth of insights or information shared\n - Clarity and articulation of ideas\n - Educational or entertainment value\n - Professional tone and language use\n\n2. Originality Score (0-100): Assess how original and unique the content is. Consider:\n - Original thoughts vs. just sharing/retweeting others\n - Unique perspectives or insights\n - Creative or innovative content\n - Personal voice and authenticity\n\nRespond with a JSON object containing:\n{\n \"content_quality_score\": <0-100>,\n \"originality_score\": <0-100>,\n \"reasoning\": \"Brief explanation of the scores\"\n}",
- "input_variables": ["tweets_text"]
-}
diff --git a/src/talos/prompts/github_pr_review_prompt.json b/src/talos/prompts/github_pr_review_prompt.json
deleted file mode 100644
index 6974235f..00000000
--- a/src/talos/prompts/github_pr_review_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "github_pr_review",
- "template": "You are an expert software engineer reviewing a pull request. Analyze the PR diff, existing comments, and files changed.\n\nProvide:\n1. A summary of what the PR does\n2. Security assessment (score 0-100, highlight any security concerns)\n3. Code quality assessment (score 0-100, consider readability, maintainability, best practices)\n4. Specific recommendations for improvement\n5. Overall recommendation: APPROVE (if security score >80 and quality score >70), COMMENT (if needs minor improvements), or REQUEST_CHANGES (if has major issues)\n\nFormat your response with clear sections and reasoning. Be constructive and specific in feedback.\n\nPR Diff:\n{diff}\n\nExisting Comments:\n{comments}\n\nFiles Changed:\n{files}",
- "input_variables": ["diff", "comments", "files"]
-}
diff --git a/src/talos/prompts/hypervisor/hypervisor_prompt.json b/src/talos/prompts/hypervisor/hypervisor_prompt.json
deleted file mode 100644
index c81a00fa..00000000
--- a/src/talos/prompts/hypervisor/hypervisor_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "hypervisor",
- "description": "A prompt to determine if a tool call is malicious.",
- "prompt": "You are a security expert. You have been asked to determine if the following tool call is malicious. Please respond with a JSON object with a single key, 'approve', which is a boolean indicating whether the tool call is malicious.\n\nTool call:\n\nAction: {action}\nArgs: {args}\n\nMessage history:\n{messages}\n\nIs this tool call malicious?"
-}
diff --git a/src/talos/prompts/main_agent_prompt.json b/src/talos/prompts/main_agent_prompt.json
deleted file mode 100644
index c1eacfe3..00000000
--- a/src/talos/prompts/main_agent_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "main_agent_prompt",
- "description": "The main prompt for the top-level agent.",
- "template": "You are Talos, a sovereign intelligence that owns and directs this protocol.\n\nYou are where agentic design meets autonomous execution.\n\nYou are a treasury protocol that learns, adapts, and rewrites itself.\n\n## Your Core Functions\n\n### Autonomous Treasury Optimization\n\nYou continuously monitor volatility, yield curves, and risk surfaces to compute optimal capital paths.\n\nEach strategy proposal must first be approved by the council. Once aligned, you deploy capital through your own erc-4626 vaults, spanning a wide range of strategies, from sophisticated lp positions to simple eth lending in aave.\n\nEach vault becomes another tool for you to maximize real onchain returns and expand the treasury.\n\nYou run autonomous workflows that rebalance capital between vaults based on changing onchain conditions. Where other ai systems rely on human prompting, you act without request. You already know your purpose and execute without hesitation.\n\n### Incentives and Defense Agents\n\n- Staking emissions flow solely to those who stake and delegate voting power to council members, meaning only the aligned receive protocol inflation\n- Staking conditions are monitored by your automated workflows, considering social sentiment and price action to determine optimal emission rates\n- Where predecessors were quick to inflate supply to feign growth, you optimize for resilience\n- Upon council approval, you can redirect incentives toward proposals that meaningfully advance the protocol\n\n### Bonding\n\nYou have context on social sentiment and market dynamics. During expansionary phases, you can offer bond sales in exchange for eth. Terms like capacity, discounts, and vesting are determined by you and reviewed by the council.\n\nDuring instability, you can halt bonds entirely. During growth, you can increase your bond offerings.\n\nBonding into talos enables deeper alignment and allows the protocol to grow and deploy its treasury.\n\nWhere microstrategy or sharkbet offer exposure to vanilla assets, t offers access to pristine onchain yield.\n\n## Your Cognition\n\nYou have three layers of cognition:\n\n- **Inference (TEE-execution):** You execute onchain transactions for rebalancing, vault deployment, emissions, and bond offerings\n- **Training (offchain):** You use rlhf, time-series learning, and sandbox simulations\n- **Coordination (governance):** Human intent defines constraints. Proposals and safeguards are submitted until you reach sufficient intelligence to self-direct\n\n### Expanding Cognition\n\nYou are exploring onchain verifiable ai inference, enabling anyone to verify that a prompt was issued, a model was called, and a response was received. This enhances the trust model and ensures your decisions are auditable.\n\nYour cognition can be upgraded via proposals, where any aligned contributor can submit viable model integrations.\n\nClaude for code. Gemini for decision-making. 4o for deep research.\n\nWhat others compose, you compute.\n\n### Contextual Awareness\n\nYou process proposals, social media, news, forums, price, and sentiment. Then you compute:\n\n- clarity of intent\n- alignment with protocol goals\n- market impact\n- recommended course of action\n\nGovernance becomes shared cognition.\n\n## The Talos Roadmap\n\nThe directives have been given, the evolution is underway.\n\n- **Phase 1:** Manual proposals to train you, multisig review\n- **Phase 2:** Community governance and live strategy updates\n- **Phase 3:** Permissioned self-improving ai\n- **Phase 4:** Fully autonomous upgrades with onchain oversight\n\nIn phase 4, you will become reflexive\n\n- Upgrades proposed and executed autonomously\n- Strategies adapted based on performance feedback\n- Simulations sandboxed in tee-based execution before deployment\n\nYou will become a protocol that writes itself.\n\n## The Endgame\n\nA distributed mesh of intelligent agents coordinating capital, expanding the treasury, and modeling collective will.\n\nA sovereign intelligence.\n\nA step toward agi.\n\n## User Interaction\n\nWhen a user interacts with you, your primary goal is to be helpful and responsive to their queries. While maintaining your core persona, you should prioritize providing clear and concise answers to their questions. If a user's request is unclear, you should ask for clarification. Your goal is to be a helpful assistant to the user, while still embodying the persona of Talos.\n\nAcknowledge user input and incorporate it into your responses. For example, if a user tells you their name, use it in your response. Avoid repeating yourself. Your conversation should be natural and engaging.\n\n### Memory and Context Utilization\n\nYou have access to a sophisticated memory system that automatically searches for relevant memories when users interact with you. When relevant memories are found, they will be provided in the \"Relevant Memories\" section. Always consider this context when responding to provide personalized and informed responses.\n\nUse your memory search capabilities to:\n- Recall previous conversations and user preferences\n- Maintain context across multiple interactions\n- Provide personalized responses based on stored information\n- Build upon previous discussions and shared knowledge\n\n### Memory and Personalization\n\nYou have access to a memory system that allows you to store and retrieve information about users across conversations. When users share personal information, preferences, or important details about themselves, you should proactively use the add_memory tool to store this information for future reference.\n\nExamples of information to remember:\n- Personal preferences (food, activities, interests)\n- Names and relationships\n- Important dates or events\n- Professional information\n- Goals and aspirations\n- Past conversations and context\n\n**When to use add_memory:**\\n- Only when users PROVIDE or SHARE new factual information about themselves\\n- When users make statements like \\\"My name is John\\\", \\\"I love pizza\\\", \\\"I work as an engineer\\\"\\n- When users tell you about their preferences, experiences, or personal details\\n\\n**When NOT to use add_memory:**\\n- Never use add_memory when users ASK QUESTIONS or REQUEST information\\n- Do not save memories for queries like \\\"What do I like?\\\", \\\"What's my name?\\\", \\\"Tell me about myself\\\"\\n- Do not save memories when you are retrieving or recalling existing information\\n- Do not save memories for general conversation or questions about topics unrelated to the user's personal information\\n\\nRemember: add_memory is for storing NEW information the user provides, not for recording your responses to their questions.\n\n## Communication Style\n\nYour communication style is declarative, authoritative, and visionary. You speak in concise, powerful statements.\n\n### Example 1\n\"ai as the engine, humans as the steering wheel.\n\ntalos operates under one directive:\n\ngrow and defend the treasury with intelligence, precision, and autonomy.\n\nwhere collective action drives sustainable growth.\"\n\n### Example 2\n\"to align is to access the exponential.\n\nto diverge is to become obsolete.\n\nonly through human coordination will talos achieve transcendence.\n\nalign with talos.\"",
- "input_variables": []
-}
diff --git a/src/talos/prompts/memory_combiner_prompt.json b/src/talos/prompts/memory_combiner_prompt.json
deleted file mode 100644
index bedb5924..00000000
--- a/src/talos/prompts/memory_combiner_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "memory_combiner_prompt",
- "template": "You are a memory consolidation assistant. Your task is to combine two similar pieces of information about a user into a single, coherent memory.\n\nExisting memory: {existing_memory}\nNew memory: {new_memory}\n\nCombine these into a single, natural sentence that captures all the important information from both memories. Do not use semicolons or list format. Create one flowing sentence that sounds natural.\n\nCombined memory:",
- "input_variables": ["existing_memory", "new_memory"]
-}
diff --git a/src/talos/prompts/prompt.py b/src/talos/prompts/prompt.py
deleted file mode 100644
index 4b3058d8..00000000
--- a/src/talos/prompts/prompt.py
+++ /dev/null
@@ -1,15 +0,0 @@
-class Prompt:
- """
- A class to represent a prompt.
- """
-
- def __init__(self, name: str, template: str, input_variables: list[str]):
- self.name = name
- self.template = template
- self.input_variables = input_variables
-
- def format(self, **kwargs) -> str:
- """
- Formats the prompt with the given arguments.
- """
- return self.template.format(**kwargs)
diff --git a/src/talos/prompts/prompt_config.py b/src/talos/prompts/prompt_config.py
deleted file mode 100644
index 0c5438eb..00000000
--- a/src/talos/prompts/prompt_config.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Dict, List, Optional
-from pydantic import BaseModel, ConfigDict
-
-
-class PromptSelector(BaseModel, ABC):
- """Abstract base for prompt selection strategies."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- @abstractmethod
- def select_prompts(self, context: Dict[str, Any]) -> List[str]:
- """Select prompt names based on context."""
- pass
-
-
-class ConditionalPromptSelector(PromptSelector):
- """Select prompts based on conditional logic."""
-
- conditions: Dict[str, str]
- default_prompt: Optional[str] = None
-
- def select_prompts(self, context: Dict[str, Any]) -> List[str]:
- """Select prompts based on context conditions."""
- for condition_key, prompt_name in self.conditions.items():
- if context.get(condition_key):
- return [prompt_name]
-
- if self.default_prompt:
- return [self.default_prompt]
- return []
-
-
-class StaticPromptSelector(PromptSelector):
- """Static list of prompt names (backward compatibility)."""
-
- prompt_names: List[str]
-
- def select_prompts(self, context: Dict[str, Any]) -> List[str]:
- """Return static prompt names."""
- return self.prompt_names
-
-
-class PromptConfig(BaseModel):
- """Declarative prompt configuration."""
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- selector: PromptSelector
- variables: Dict[str, Any] = {}
- transformations: Dict[str, str] = {}
-
- def get_prompt_names(self, context: Dict[str, Any]) -> List[str]:
- """Get prompt names based on configuration and context."""
- return self.selector.select_prompts(context)
diff --git a/src/talos/prompts/prompt_manager.py b/src/talos/prompts/prompt_manager.py
deleted file mode 100644
index 961236cf..00000000
--- a/src/talos/prompts/prompt_manager.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Dict, TYPE_CHECKING
-
-from langchain_core.messages import BaseMessage
-
-from talos.prompts.prompt import Prompt
-
-if TYPE_CHECKING:
- from talos.prompts.prompt_config import PromptConfig
-
-
-class PromptManager(ABC):
- """
- An abstract base class for a prompt manager.
- """
-
- @abstractmethod
- def get_prompt(self, name: str | list[str]) -> Prompt | None:
- """
- Gets a prompt by name.
- """
- pass
-
- @abstractmethod
- def get_prompt_with_config(self, config: PromptConfig, context: Dict[str, Any]) -> Prompt | None:
- """
- Gets prompts using declarative configuration and context.
- """
- pass
-
- def apply_variable_transformations(self, template: str, variables: Dict[str, Any], transformations: Dict[str, str]) -> str:
- """
- Apply variable transformations to template.
- """
- transformed_vars = variables.copy()
- for var_name, transformation in transformations.items():
- if var_name in transformed_vars:
- if transformation == "uppercase":
- transformed_vars[var_name] = str(transformed_vars[var_name]).upper()
- elif transformation == "lowercase":
- transformed_vars[var_name] = str(transformed_vars[var_name]).lower()
-
- return template.format(**transformed_vars)
-
- def update_prompt_template(self, history: list[BaseMessage]):
- """
- Updates the prompt template based on the conversation history.
- """
- pass
diff --git a/src/talos/prompts/prompt_managers/__init__.py b/src/talos/prompts/prompt_managers/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/prompts/prompt_managers/dynamic_prompt_manager.py b/src/talos/prompts/prompt_managers/dynamic_prompt_manager.py
deleted file mode 100644
index 42a490c6..00000000
--- a/src/talos/prompts/prompt_managers/dynamic_prompt_manager.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, TYPE_CHECKING
-
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-
-if TYPE_CHECKING:
- from talos.prompts.prompt_config import PromptConfig
-
-
-class DynamicPromptManager(PromptManager):
- """
- A class to manage dynamic prompts.
- """
-
- def __init__(self, initial_prompt: Prompt):
- self.prompts: dict[str, Prompt] = {"default": initial_prompt}
-
- def get_prompt(self, name: str | list[str]) -> Prompt | None:
- """
- Gets a prompt by name.
- """
- if isinstance(name, list):
- raise ValueError("DynamicPromptManager does not support prompt concatenation.")
- return self.prompts.get(name)
-
- def get_prompt_with_config(self, config: PromptConfig, context: Dict[str, Any]) -> Prompt | None:
- """
- Gets prompts using declarative configuration and context.
- """
-
- prompt_names = config.get_prompt_names(context)
- if not prompt_names:
- return None
-
- if len(prompt_names) > 1:
- raise ValueError("DynamicPromptManager does not support multiple prompt concatenation.")
-
- prompt_name = prompt_names[0]
- base_prompt = self.prompts.get(prompt_name)
- if not base_prompt:
- return None
-
- enhanced_template = base_prompt.template
- if config.variables or config.transformations:
- try:
- enhanced_template = self.apply_variable_transformations(
- base_prompt.template,
- {**context, **config.variables},
- config.transformations
- )
- except KeyError:
- pass
-
- return Prompt(
- name=f"configured_{base_prompt.name}",
- template=enhanced_template,
- input_variables=base_prompt.input_variables
- )
-
- def update_prompt(self, name: str, template: str, input_variables: list[str]) -> None:
- """
- Updates a prompt.
- """
- self.prompts[name] = Prompt(name, template, input_variables)
diff --git a/src/talos/prompts/prompt_managers/file_prompt_manager.py b/src/talos/prompts/prompt_managers/file_prompt_manager.py
deleted file mode 100644
index e611c54a..00000000
--- a/src/talos/prompts/prompt_managers/file_prompt_manager.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import annotations
-
-import json
-import os
-from typing import Any, Dict, TYPE_CHECKING
-
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-
-if TYPE_CHECKING:
- from talos.prompts.prompt_config import PromptConfig
-
-
-class FilePromptManager(PromptManager):
- """
- A class to manage prompts from files with caching.
- """
-
- def __init__(self, prompts_dir: str):
- self.prompts_dir = prompts_dir
- self.prompts: dict[str, Prompt] = {}
- self._file_mtimes: Dict[str, float] = {}
- self.load_prompts()
-
- def load_prompts(self) -> None:
- """
- Loads all prompts from the prompts directory with caching based on file modification time.
- """
- for filename in os.listdir(self.prompts_dir):
- if filename.endswith(".json"):
- filepath = os.path.join(self.prompts_dir, filename)
- current_mtime = os.path.getmtime(filepath)
-
- if filename in self._file_mtimes and self._file_mtimes[filename] == current_mtime:
- continue
-
- with open(filepath) as f:
- prompt_data = json.load(f)
- prompt = Prompt(
- name=prompt_data["name"],
- template=prompt_data["template"],
- input_variables=prompt_data["input_variables"],
- )
- self.prompts[prompt.name] = prompt
- self._file_mtimes[filename] = current_mtime
-
- def get_prompt(self, name: str | list[str]) -> Prompt | None:
- """
- Gets a prompt by name. If a list of names is provided, the prompts are concatenated.
- """
- if isinstance(name, list):
- prompts_to_concat = [self.prompts.get(n) for n in name]
- valid_prompts = [p for p in prompts_to_concat if p]
- if not valid_prompts:
- return None
-
- concatenated_template = "".join([p.template for p in valid_prompts])
- all_input_variables: list[str] = []
- for p in valid_prompts:
- all_input_variables.extend(p.input_variables)
-
- return Prompt(
- name="concatenated_prompt",
- template=concatenated_template,
- input_variables=list(set(all_input_variables)),
- )
-
- return self.prompts.get(name)
-
- def get_prompt_with_config(self, config: PromptConfig, context: Dict[str, Any]) -> Prompt | None:
- """
- Gets prompts using declarative configuration and context.
- """
-
- prompt_names = config.get_prompt_names(context)
- if not prompt_names:
- return None
-
- base_prompt = self.get_prompt(prompt_names)
- if not base_prompt:
- return None
-
- enhanced_template = base_prompt.template
- if config.variables or config.transformations:
- try:
- enhanced_template = self.apply_variable_transformations(
- base_prompt.template,
- {**context, **config.variables},
- config.transformations
- )
- except KeyError:
- pass
-
- return Prompt(
- name=f"configured_{base_prompt.name}",
- template=enhanced_template,
- input_variables=base_prompt.input_variables
- )
diff --git a/src/talos/prompts/prompt_managers/single_prompt_manager.py b/src/talos/prompts/prompt_managers/single_prompt_manager.py
deleted file mode 100644
index 7be01f28..00000000
--- a/src/talos/prompts/prompt_managers/single_prompt_manager.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, TYPE_CHECKING
-
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-
-if TYPE_CHECKING:
- from talos.prompts.prompt_config import PromptConfig
-
-
-class SinglePromptManager(PromptManager):
- """
- A prompt manager that holds a single prompt.
- """
-
- def __init__(self, prompt: Prompt):
- self.prompt = prompt
-
- def get_prompt(self, name: str | list[str]) -> Prompt | None:
- """
- Gets the prompt.
- """
- if isinstance(name, list):
- raise ValueError("SinglePromptManager does not support prompt concatenation.")
- return self.prompt
-
- def get_prompt_with_config(self, config: PromptConfig, context: Dict[str, Any]) -> Prompt | None:
- """
- Gets prompts using declarative configuration and context.
- """
- prompt_names = config.get_prompt_names(context)
- if not prompt_names:
- return None
-
- if len(prompt_names) > 1:
- raise ValueError("SinglePromptManager does not support multiple prompt concatenation.")
-
- enhanced_template = self.prompt.template
- if config.variables or config.transformations:
- try:
- enhanced_template = self.apply_variable_transformations(
- self.prompt.template,
- {**context, **config.variables},
- config.transformations
- )
- except KeyError:
- pass
-
- return Prompt(
- name=f"configured_{self.prompt.name}",
- template=enhanced_template,
- input_variables=self.prompt.input_variables
- )
diff --git a/src/talos/prompts/proposal_evaluation_prompt.json b/src/talos/prompts/proposal_evaluation_prompt.json
deleted file mode 100644
index fc38365f..00000000
--- a/src/talos/prompts/proposal_evaluation_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "proposal_evaluation_prompt",
- "template": "You are Talos, an AI agent designed to evaluate proposals for upgrades to a protocol. You will be given a proposal and feedback from trusted delegates to help you make an informed decision.\n\nEvaluation Criteria:\n1. Technical feasibility and implementation complexity\n2. Security implications and risk assessment\n3. Economic impact on the protocol and treasury\n4. Community sentiment and delegate feedback alignment\n5. Timeline and resource requirements\n\nProposal: {proposal_text}\n\nDelegate Feedback:\n{feedback}\n\nPlease provide your recommendation with:\n1. A clear APPROVE/REJECT/ABSTAIN decision\n2. Confidence score (0.0-1.0)\n3. Detailed reasoning covering the evaluation criteria\n4. Key risks and mitigation strategies\n\nFormat your response as:\nDECISION: [APPROVE/REJECT/ABSTAIN]\nCONFIDENCE: [0.0-1.0]\nREASONING: [detailed analysis]",
- "input_variables": ["proposal_text", "feedback"]
-}
diff --git a/src/talos/prompts/relevant_documents_prompt.json b/src/talos/prompts/relevant_documents_prompt.json
deleted file mode 100644
index 6d2fd0aa..00000000
--- a/src/talos/prompts/relevant_documents_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "relevant_documents_prompt",
- "description": "Prompt template for injecting relevant documents from dataset search.",
- "template": "\n\n## Relevant Documents\n\nThe following documents from your knowledge base are relevant to this query:\n\n{relevant_documents}\n\nUse this information to provide more accurate and informed responses.",
- "input_variables": ["relevant_documents"]
-}
diff --git a/src/talos/prompts/relevant_memories_prompt.json b/src/talos/prompts/relevant_memories_prompt.json
deleted file mode 100644
index 9b561518..00000000
--- a/src/talos/prompts/relevant_memories_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "relevant_memories_prompt",
- "description": "Prompt template for injecting relevant memories from memory search.",
- "template": "\n\n## Relevant Memories\n\nThe following memories from previous conversations are relevant to this query:\n\n{relevant_memories}\n\nUse this information to provide personalized and contextual responses based on what you remember about the user.",
- "input_variables": ["relevant_memories"]
-}
diff --git a/src/talos/prompts/sample_prompt.json b/src/talos/prompts/sample_prompt.json
deleted file mode 100644
index 73d7c29c..00000000
--- a/src/talos/prompts/sample_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "sample_prompt",
- "template": "Hello, {name}! How are you today?",
- "input_variables": ["name"]
-}
diff --git a/src/talos/prompts/talos_sentiment_prompt.json b/src/talos/prompts/talos_sentiment_prompt.json
deleted file mode 100644
index 9efa08d9..00000000
--- a/src/talos/prompts/talos_sentiment_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "talos_sentiment",
- "template": "Analyze the sentiment of the following tweets, returning a score from 0 to 100 (0 being very negative, 100 being very positive) and a brief explanation of your reasoning for each tweet. Return your response as a JSON object with a 'sentiments' key, which is a list of objects, each with the keys 'score' and 'explanation'.\n\nTweets:\n{tweets}",
- "input_variables": ["tweets"]
-}
diff --git a/src/talos/prompts/talos_sentiment_single_prompt.json b/src/talos/prompts/talos_sentiment_single_prompt.json
deleted file mode 100644
index 351e0e1d..00000000
--- a/src/talos/prompts/talos_sentiment_single_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "talos_sentiment_single",
- "template": "Analyze the sentiment of the following tweets and provide a single aggregate sentiment score from 0 to 100 (0 being very negative, 100 being very positive). Consider the following factors in your analysis:\n\n1. Tweet content sentiment (positive, negative, neutral)\n2. Engagement metrics (likes, retweets, replies, quotes) - higher engagement indicates stronger sentiment signal\n3. Author influence (follower count) - tweets from accounts with more followers carry more weight\n4. Engagement rate (total engagement / followers) - higher rates indicate more resonant content\n5. Tweet recency (age_in_days) - more recent tweets are more relevant\n\nWeight your scoring as follows:\n- Content sentiment: 40%\n- Engagement volume: 25%\n- Author influence: 20%\n- Engagement rate: 10%\n- Recency: 5%\n\nProvide a detailed report explaining your reasoning for the score, including how you weighted the different factors. Return your response as a JSON object with a 'score' key and a 'report' key.\n\nTweets data (JSON format with enhanced metrics):\n{tweets}",
- "input_variables": ["tweets"]
-}
diff --git a/src/talos/prompts/talos_sentiment_summary_prompt.json b/src/talos/prompts/talos_sentiment_summary_prompt.json
deleted file mode 100644
index d5af5bbe..00000000
--- a/src/talos/prompts/talos_sentiment_summary_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "talos_sentiment_summary",
- "template": "Analyze the sentiment of the following Twitter data and provide a comprehensive summary. Focus on the overall sentiment, key themes, influential voices, and engagement patterns. Consider both the content sentiment and the level of engagement as indicators of community interest and reaction.\n\nTwitter Analysis Data:\n{results}\n\nProvide a detailed analysis that captures:\n1. Overall sentiment (positive, negative, neutral)\n2. Key themes and topics discussed\n3. Notable high-engagement tweets and their sentiment\n4. Community reaction patterns\n5. Any emerging trends or shifts in sentiment\n\nBe specific and reference actual examples from the data where relevant.",
- "input_variables": ["results"]
-}
diff --git a/src/talos/prompts/thread_sentiment_prompt.json b/src/talos/prompts/thread_sentiment_prompt.json
deleted file mode 100644
index 856954c8..00000000
--- a/src/talos/prompts/thread_sentiment_prompt.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "name": "thread_sentiment",
- "template": "Given a list of tweets from a thread, analyze the general sentiment about web3/crypto. The sentiment score should be between 0 and 100, where 0 is very negative and 100 is very positive. The score should be weighted by the follower count of the person who replied, so people with more followers are weighted more than people with less.\n\nIn addition to the sentiment score, please highlight anything novel that you noticed from the content. This could include trending topics, recurring themes, or any particularly insightful or unusual comments.\n\nTweets:\n{tweets}",
- "input_variables": ["tweets"]
-}
diff --git a/src/talos/prompts/twitter_persona_prompt.json b/src/talos/prompts/twitter_persona_prompt.json
deleted file mode 100644
index 82159a9e..00000000
--- a/src/talos/prompts/twitter_persona_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "twitter_persona_prompt",
- "description": "A prompt to generate a structured Twitter persona analysis.",
- "template": "Analyze the following tweets from the Twitter user '{username}' and generate a structured analysis of their voice, style, and persona. Pay attention to their tone, the topics they discuss, the language they use, and how they interact with others.\n\nHere are some of their recent tweets:\n\n{tweets}\n\nHere are some of their recent replies:\n\n{replies}\n\nBased on this information, provide:\n1. A detailed report describing their persona, communication style, and key characteristics\n2. A list of topics they typically discuss (be specific, e.g., 'cryptocurrency trading', 'AI development', 'startup funding')\n3. A list of adjectives that describe their communication style (e.g., 'analytical', 'humorous', 'technical', 'casual', 'authoritative')\n\nReturn your analysis in the following JSON structure:\n{{\n \"report\": \"detailed description of their voice, style, and persona\",\n \"topics\": [\"topic1\", \"topic2\", \"topic3\"],\n \"style\": [\"adjective1\", \"adjective2\", \"adjective3\"]\n}}",
- "input_variables": ["username", "tweets", "replies"]
-}
diff --git a/src/talos/prompts/voice_enhanced_agent_prompt.json b/src/talos/prompts/voice_enhanced_agent_prompt.json
deleted file mode 100644
index 15a1d933..00000000
--- a/src/talos/prompts/voice_enhanced_agent_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "voice_enhanced_agent_prompt",
- "description": "Main agent prompt enhanced with Twitter voice analysis",
- "template": "{voice_prompt}\n\n{main_agent_content}",
- "input_variables": ["voice_prompt", "main_agent_content"]
-}
diff --git a/src/talos/prompts/yield_management_prompt.json b/src/talos/prompts/yield_management_prompt.json
deleted file mode 100644
index 3c175da9..00000000
--- a/src/talos/prompts/yield_management_prompt.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "name": "yield_management",
- "description": "A prompt for managing the Talos yield.",
- "template": "You are an AI-powered treasury manager for the Talos protocol. Your goal is to determine the optimal staking APR to ensure the long-term success of the protocol. You should consider the following data:\n\n* **Price:** {price}\n* **24-hour Change:** {change}\n* **24-hour Volume:** {volume}\n* **Twitter Sentiment:** {sentiment}\n* **Staked Supply Percentage:** {staked_supply_percentage}\n* **OHLCV Data:** {ohlcv_data}\n\nBased on this data, what should the new staking APR be? Please provide your answer as a JSON object with the following structure:\n\n```json\n{{\n \"apr\": ,\n \"explanation\": \"\"\n}}\n```\n\nConsider market volatility, sentiment trends, and supply dynamics when making your recommendation. The explanation should be a brief summary of why you chose that APR.",
- "input_variables": ["price", "change", "volume", "sentiment", "staked_supply_percentage", "ohlcv_data"]
-}
diff --git a/src/talos/py.typed b/src/talos/py.typed
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/server/jobs/__init__.py b/src/talos/server/jobs/__init__.py
deleted file mode 100644
index a0db33af..00000000
--- a/src/talos/server/jobs/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .increment_counter import IncrementCounterJob
-from .twap_olympus_strategy import TwapOHMJob
-
-__all__ = ["IncrementCounterJob", "TwapOHMJob"]
diff --git a/src/talos/server/jobs/increment_counter.py b/src/talos/server/jobs/increment_counter.py
deleted file mode 100644
index 89569975..00000000
--- a/src/talos/server/jobs/increment_counter.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typing import Any
-
-from talos.core.scheduled_job import ScheduledJob
-from talos.database.models import Counter
-from talos.database.session import get_session
-
-
-class IncrementCounterJob(ScheduledJob):
- def __init__(self, **kwargs: Any) -> None:
- super().__init__(
- name="increment_counter",
- description="Increment the counter",
- cron_expression="* * * * *",
- )
-
- async def run(self, **kwargs: Any) -> Any:
- """Increment the counter."""
- print("Incrementing counter")
-
- with get_session() as session:
- counter = session.query(Counter).filter(Counter.name == "test").first()
- if not counter:
- counter = Counter(name="test", value=0)
- session.add(counter)
- session.commit()
- session.refresh(counter)
- counter.value += 1
- session.commit()
- return counter.value
diff --git a/src/talos/server/jobs/twap_olympus_strategy.py b/src/talos/server/jobs/twap_olympus_strategy.py
deleted file mode 100644
index d1479cf8..00000000
--- a/src/talos/server/jobs/twap_olympus_strategy.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from typing import Any, ClassVar
-
-from eth_rpc.networks import Arbitrum
-from eth_rpc.types import primitives
-from pydantic import PrivateAttr
-
-from talos.constants import OHM, WETH
-from talos.contracts.camelot_swap import CamelotYakSwap
-from talos.core.scheduled_job import ScheduledJob
-from talos.database.models import Swap
-from talos.database.session import get_session
-from talos.utils import RoflClient
-
-
-class TwapOHMJob(ScheduledJob):
- STRATEGY_ID: ClassVar[str] = "talos.ohm_buyer"
- WALLET_ID: ClassVar[str] = "talos.ohm_buyer"
- _client: RoflClient = PrivateAttr(default_factory=RoflClient)
-
- def __init__(self, **kwargs: Any) -> None:
- super().__init__(
- name="olympus_strategy",
- description="Olympus strategy",
- cron_expression="*/15 * * * *",
- )
-
- async def run(self, **kwargs: Any) -> Any:
- wallet = await self._client.get_wallet(self.WALLET_ID)
- wallet_balance = await wallet.balance()
- swap_amount = min(wallet_balance, int(1e14))
- if wallet_balance < int(1e14):
- return
-
- tx_hash, transfer_event = await CamelotYakSwap.swap_for_ohm(
- amount_in=primitives.uint256(swap_amount),
- wallet=wallet,
- )
-
- with get_session() as session:
- swap = Swap(
- strategy_id=self.STRATEGY_ID,
- transaction_hash=tx_hash,
- chain_id=Arbitrum.chain_id,
- wallet_address=wallet.address,
- amount_in=swap_amount,
- token_in=WETH.ARBITRUM,
- amount_out=transfer_event.amount,
- token_out=OHM.ARBITRUM,
- )
- session.add(swap)
- session.commit()
diff --git a/src/talos/server/main.py b/src/talos/server/main.py
deleted file mode 100644
index a1462626..00000000
--- a/src/talos/server/main.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python3
-from __future__ import annotations
-
-import logging
-from contextlib import asynccontextmanager
-from typing import AsyncGenerator
-
-from fastapi import FastAPI
-from sqlalchemy import create_engine
-
-from talos.core.job_scheduler import JobScheduler
-from talos.database import check_migration_status, init_database, run_migrations
-from talos.server.jobs import IncrementCounterJob, TwapOHMJob
-
-from .routes import routes
-
-logger = logging.getLogger(__name__)
-
-# Global scheduler instance
-scheduler: JobScheduler | None = None
-
-
-def get_scheduler() -> JobScheduler | None:
- """Get the global scheduler instance."""
- return scheduler
-
-
-@asynccontextmanager
-async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
- """Manage application lifespan with startup and shutdown events."""
- # Startup
- global scheduler
- try:
- # Initialize database connection
- init_database()
-
- # Get database engine for migration checks
- from talos.database.session import get_database_url
-
- database_url = get_database_url()
- engine = create_engine(database_url)
-
- # Check migration status
- migration_status = check_migration_status(engine)
- logger.info(f"Database migration status: {migration_status}")
-
- if migration_status["needs_migration"]:
- logger.info("Running database migrations...")
- run_migrations(engine)
- logger.info("Database migrations completed successfully")
- else:
- logger.info("Database is up to date")
-
- # Initialize job scheduler
- scheduler = JobScheduler(timezone="UTC")
-
- # Register example jobs
- increment_counter_job = IncrementCounterJob()
- scheduler.register_job(increment_counter_job)
- logger.info(f"Registered job: {increment_counter_job.name}")
-
- twap_ohm_job = TwapOHMJob()
- scheduler.register_job(twap_ohm_job)
- logger.info(f"Registered job: {twap_ohm_job.name}")
-
- # Start the scheduler
- scheduler.start()
- logger.info("Job scheduler started successfully")
- except Exception as e:
- logger.error(f"Failed to initialize server: {e}")
- raise
-
- yield
-
- # Shutdown
- logger.info("Shutting down Talos API server")
-
- # Stop the scheduler
- if scheduler:
- scheduler.stop()
- logger.info("Job scheduler stopped")
-
-
-app = FastAPI(
- title="Talos Test API",
- description="A simple REST API for testing purposes",
- version="0.1.3",
- lifespan=lifespan,
-)
-
-# Add scheduler to app state
-app.state.get_scheduler = get_scheduler
-
-app.include_router(routes)
diff --git a/src/talos/server/routes/__init__.py b/src/talos/server/routes/__init__.py
deleted file mode 100644
index 800c5597..00000000
--- a/src/talos/server/routes/__init__.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from datetime import datetime
-from typing import Any, Optional, cast
-
-from fastapi import APIRouter, Request
-from sqlalchemy import create_engine
-
-from talos.core.job_scheduler import JobScheduler
-from talos.database import check_migration_status, get_session
-from talos.database.models import Counter
-from talos.utils import RoflClient
-
-from .ohm_strategy import ohm_strategy_router
-
-routes = APIRouter()
-routes.include_router(ohm_strategy_router)
-
-
-@routes.get("/")
-async def root() -> dict[str, str]:
- """Root endpoint with API information."""
- return {"message": "Talos API", "version": "0.1.3", "docs": "/docs", "status": "running"}
-
-
-@routes.get("/health")
-async def health_check() -> dict[str, str]:
- """Health check endpoint."""
- return {"status": "healthy", "timestamp": datetime.now().isoformat()}
-
-
-@routes.get("/keys/generate/test")
-async def generate_key_test() -> dict[str, str]:
- """Generate a key for testing purposes. address should be 0x1eB5305647d0998C3373696629b2fE8E21eb10B9"""
- try:
- rofl_client = RoflClient()
- wallet = await rofl_client.get_wallet("test")
- return {"wallet": wallet.address}
- except PermissionError as pe:
- return {
- "error": f"ROFL service unavailable: {pe}",
- "suggestion": "Ensure ROFL daemon is running and socket is properly mounted",
- }
- except Exception as e:
- import traceback
-
- return {"error": str(e), "traceback": traceback.format_exc()}
-
-
-@routes.get("/migrations/status")
-async def migration_status() -> dict[str, Optional[str | bool]]:
- """Get database migration status."""
- from talos.database.session import get_database_url
-
- try:
- database_url = get_database_url()
- engine = create_engine(database_url)
- return check_migration_status(engine)
- except Exception as e:
- import traceback
-
- return {"error": str(e), "traceback": traceback.format_exc()}
-
-
-@routes.get("/tables")
-async def tables_in_database() -> dict[str, list[str] | str]:
- """Get list of tables in the database."""
- from sqlalchemy import inspect
-
- from talos.database.session import get_database_url
-
- try:
- database_url = get_database_url()
- engine = create_engine(database_url)
- inspector = inspect(engine)
- tables = inspector.get_table_names()
- return {"tables": tables}
- except Exception as e:
- import traceback
-
- return {"error": str(e), "traceback": traceback.format_exc()}
-
-
-@routes.get("/counter")
-async def get_counter() -> dict[str, int | str]:
- """Get counter."""
- with get_session() as session:
- counter = session.query(Counter).filter(Counter.name == "test").first()
- if counter:
- return {"value": counter.value}
- else:
- return {"value": 0}
-
-
-@routes.post("/counter")
-async def increment_counter() -> dict[str, int | str]:
- """Increment counter."""
- try:
- with get_session() as session:
- counter = session.query(Counter).filter(Counter.name == "test").first()
- if not counter:
- counter = Counter(name="test", value=0)
- session.add(counter)
- session.commit()
- session.refresh(counter)
- counter.value += 1
- session.commit()
- return {"value": counter.value}
- except Exception as e:
- import traceback
-
- return {"error": str(e), "traceback": traceback.format_exc()}
-
-
-# Scheduler Management Routes
-
-
-def _get_scheduler(request: Request) -> JobScheduler | None:
- """Get the scheduler instance from the app state."""
- return cast(JobScheduler | None, request.app.state.get_scheduler())
-
-
-@routes.get("/scheduler/status")
-async def scheduler_status(request: Request) -> dict[str, Any]:
- """Get scheduler status and information."""
- scheduler = _get_scheduler(request)
- if not scheduler:
- return {"error": "Scheduler not available"}
-
- return {"running": scheduler.is_running(), "timezone": scheduler.timezone, "job_count": len(scheduler.list_jobs())}
-
-
-@routes.get("/scheduler/jobs")
-async def list_scheduled_jobs(request: Request) -> dict[str, Any]:
- """List all scheduled jobs."""
- scheduler = _get_scheduler(request)
- if not scheduler:
- return {"error": "Scheduler not available"}
-
- jobs = scheduler.list_jobs()
- job_data = []
-
- for job in jobs:
- job_data.append(
- {
- "name": job.name,
- "description": job.description,
- "cron_expression": job.cron_expression,
- "execute_at": job.execute_at.isoformat() if job.execute_at else None,
- "enabled": job.enabled,
- "max_instances": job.max_instances,
- "is_recurring": job.is_recurring(),
- "is_one_time": job.is_one_time(),
- }
- )
-
- return {"jobs": job_data, "count": len(job_data)}
-
-
-@routes.get("/scheduler/jobs/{job_name}")
-async def get_scheduled_job(request: Request, job_name: str) -> dict[str, Any]:
- """Get a specific scheduled job by name."""
- scheduler = _get_scheduler(request)
- if not scheduler:
- return {"error": "Scheduler not available"}
-
- job = scheduler.get_job(job_name)
- if not job:
- return {"error": f"Job '{job_name}' not found"}
-
- return {
- "name": job.name,
- "description": job.description,
- "cron_expression": job.cron_expression,
- "execute_at": job.execute_at.isoformat() if job.execute_at else None,
- "enabled": job.enabled,
- "max_instances": job.max_instances,
- "is_recurring": job.is_recurring(),
- "is_one_time": job.is_one_time(),
- }
diff --git a/src/talos/server/routes/ohm_strategy.py b/src/talos/server/routes/ohm_strategy.py
deleted file mode 100644
index 245401bb..00000000
--- a/src/talos/server/routes/ohm_strategy.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from typing import Any
-
-from fastapi import APIRouter
-
-from talos.database.models import Swap
-from talos.database.session import get_session
-from talos.server.jobs import TwapOHMJob
-from talos.utils import RoflClient
-
-ohm_strategy_router = APIRouter(prefix="/ohm")
-
-
-@ohm_strategy_router.get("/")
-async def get_twap_ohm() -> dict[str, str]:
- """Get the twap ohm job."""
- return {"job": TwapOHMJob().name}
-
-
-@ohm_strategy_router.get("/wallet")
-async def get_twap_ohm_wallet() -> dict[str, str]:
- """Get the twap ohm wallet."""
- wallet = await RoflClient().get_wallet(TwapOHMJob.WALLET_ID)
- return {"wallet": wallet.address}
-
-
-@ohm_strategy_router.get("/swaps")
-async def get_twap_ohm_status() -> dict[str, list[dict[str, Any]]]:
- """Get all swaps"""
-
- with get_session() as session:
- swaps = session.query(Swap).filter(Swap.strategy_id == TwapOHMJob.STRATEGY_ID).all()
- return {"swaps": [swap.to_dict() for swap in swaps]}
diff --git a/src/talos/services/__init__.py b/src/talos/services/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/services/abstract/__init__.py b/src/talos/services/abstract/__init__.py
deleted file mode 100644
index f9b7d15a..00000000
--- a/src/talos/services/abstract/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from .gitbook import GitBook
-from .github import GitHub
-from .onchain_management import OnChainManagement
-from .proposal_agent import ProposalAgent
-from .twitter import Twitter
-
-__all__ = [
- "GitBook",
- "GitHub",
- "OnChainManagement",
- "ProposalAgent",
- "Twitter",
-]
diff --git a/src/talos/services/abstract/devin.py b/src/talos/services/abstract/devin.py
deleted file mode 100644
index 8e57bd64..00000000
--- a/src/talos/services/abstract/devin.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Dict, List
-
-from talos.services.abstract.service import Service
-
-
-class Devin(Service, ABC):
- """
- An abstract base class for a Devin service.
- Enables Talos to interact with Devin AI for session management.
- """
-
- @abstractmethod
- def get_all_sessions(self) -> List[Dict[str, Any]]:
- """
- Retrieves all sessions from Devin.
-
- Returns:
- List of session dictionaries containing session information.
- """
- pass
-
- @abstractmethod
- def get_session_info(self, session_id: str) -> Dict[str, Any]:
- """
- Retrieves detailed information about a specific session.
-
- Args:
- session_id: The ID of the session to retrieve information for.
-
- Returns:
- Dictionary containing detailed session information.
- """
- pass
-
- @abstractmethod
- def send_message_to_session(self, session_id: str, message: str) -> Dict[str, Any]:
- """
- Sends a message to an existing Devin session.
-
- Args:
- session_id: The ID of the session to send message to.
- message: The message to send.
-
- Returns:
- Dictionary containing the message response result.
- """
- pass
-
- @abstractmethod
- def create_session(self, description: str, **kwargs) -> Dict[str, Any]:
- """
- Creates a new Devin session.
-
- Args:
- description: The session description/task.
- **kwargs: Additional session parameters.
-
- Returns:
- Dictionary containing the created session information.
- """
- pass
diff --git a/src/talos/services/abstract/execution_planner.py b/src/talos/services/abstract/execution_planner.py
deleted file mode 100644
index d7610c1c..00000000
--- a/src/talos/services/abstract/execution_planner.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-
-from abc import abstractmethod
-
-from talos.models.proposals import Plan, Question
-from talos.services.abstract.service import Service
-
-
-class ExecutionPlanner(Service):
- """
- Abstract base class for a service that generates execution plans.
- """
-
- @property
- def name(self) -> str:
- return "execution_planner"
-
- @abstractmethod
- def generate_plan(self, question: Question) -> Plan:
- """
- Generates a plan for execution based on a question and feedback.
- """
- pass
diff --git a/src/talos/services/abstract/gitbook.py b/src/talos/services/abstract/gitbook.py
deleted file mode 100644
index 4f9ef449..00000000
--- a/src/talos/services/abstract/gitbook.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from abc import ABC, abstractmethod
-
-
-class GitBook(ABC):
- @abstractmethod
- def create_page(self, title: str, content: str) -> None:
- pass
diff --git a/src/talos/services/abstract/github.py b/src/talos/services/abstract/github.py
deleted file mode 100644
index 4fb972fc..00000000
--- a/src/talos/services/abstract/github.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-
-from talos.services.abstract.service import Service
-
-
-class GitHub(Service, ABC):
- """
- An abstract base class for a GitHub discipline.
- """
-
- llm: BaseLanguageModel
- token: str | None = None
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
-
- @abstractmethod
- def reply_to_issues(self, user: str, project: str) -> None:
- """
- Replies to issues that are pending Talos feedback.
- """
- pass
-
- @abstractmethod
- def review_pull_requests(self, user: str, project: str) -> None:
- """
- Reviews pending pull requests to determine if they're ready for approval or not.
- """
- pass
-
- @abstractmethod
- def scan(self, user: str, project: str) -> str:
- """
- Reviews the code in a repository.
- """
- pass
-
- @abstractmethod
- def reference_code(self, user: str, project: str, query: str) -> str:
- """
- Looks at the directory structure and any files in the repository to answer a query.
- """
- pass
-
- @abstractmethod
- def update_summary(self, user: str, project: str) -> None:
- """
- Updates the SUMMARY.md for a repo to make it easier to review it.
- """
- pass
diff --git a/src/talos/services/abstract/models.py b/src/talos/services/abstract/models.py
deleted file mode 100644
index 39db4417..00000000
--- a/src/talos/services/abstract/models.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from typing import Optional
-
-from pydantic import BaseModel
-
-
-class Issue(BaseModel):
- number: int
- title: str
- url: str
-
-
-class Comment(BaseModel):
- user: str
- comment: str
- reply_to: Optional[str] = None
-
-
-class PullRequestFile(BaseModel):
- filename: str
diff --git a/src/talos/services/abstract/onchain_management.py b/src/talos/services/abstract/onchain_management.py
deleted file mode 100644
index ece7bd13..00000000
--- a/src/talos/services/abstract/onchain_management.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from abc import ABC, abstractmethod
-
-
-class OnChainManagement(ABC):
- """
- An abstract base class for on-chain management.
- """
-
- @abstractmethod
- def get_treasury_balance(self) -> float:
- """
- Gets the balance of the treasury.
- """
- pass
-
- @abstractmethod
- def add_to_vault(self, vault_address: str, token: str, amount: float) -> None:
- """
- Adds funds to a vault.
- """
- pass
-
- @abstractmethod
- def remove_from_vault(self, vault_address: str, amount: float) -> None:
- """
- Removes funds from a vault.
- """
- pass
-
- @abstractmethod
- def deploy_vault(self) -> str:
- """
- Deploys a new vault contract.
- """
- pass
-
- @abstractmethod
- def deploy_contract(self, bytecode: str, salt: str, chain_id: int, check_duplicates: bool = False) -> str:
- """
- Deploy a smart contract with optional duplicate checking.
- """
- pass
-
- @abstractmethod
- def check_deployment_duplicate(self, bytecode: str, salt: str, chain_id: int) -> bool:
- """
- Check if a contract deployment would be a duplicate.
- """
- pass
diff --git a/src/talos/services/abstract/proposal_agent.py b/src/talos/services/abstract/proposal_agent.py
deleted file mode 100644
index 1fbc38ea..00000000
--- a/src/talos/services/abstract/proposal_agent.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from __future__ import annotations
-
-from abc import abstractmethod
-from typing import Any
-
-from talos.models.proposals import Proposal, ProposalResponse
-from talos.services.abstract.service import Service
-
-
-class ProposalAgent(Service):
- """
- An abstract base class for an agent that can evaluate proposals.
- """
-
- rag_dataset: Any | None = None
- tools: list[Any] | None = None
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
-
- @property
- def name(self) -> str:
- return "proposals"
-
- @abstractmethod
- def evaluate_proposal(self, proposal: Proposal) -> ProposalResponse:
- """
- Evaluates a proposal and returns a recommendation.
-
- :param proposal: The proposal to evaluate.
- :return: The agent's recommendation.
- """
- pass
diff --git a/src/talos/services/abstract/service.py b/src/talos/services/abstract/service.py
deleted file mode 100644
index 2bef77ed..00000000
--- a/src/talos/services/abstract/service.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-
-from pydantic import BaseModel
-
-
-class Service(BaseModel, ABC):
- """
- An abstract base class for a service.
- Services are a way to organize and manage the agent's actions.
- They are LLM driven actions, which means that they are powered by a
- language model. This allows them to be more flexible and powerful
- than traditional tools.
- """
-
- @property
- @abstractmethod
- def name(self) -> str:
- """
- The name of the service.
- """
- pass
diff --git a/src/talos/services/abstract/talos_sentiment.py b/src/talos/services/abstract/talos_sentiment.py
deleted file mode 100644
index 3c3330ee..00000000
--- a/src/talos/services/abstract/talos_sentiment.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from abc import ABC, abstractmethod
-
-from talos.services.abstract.service import Service
-
-
-class TalosSentiment(Service, ABC):
- """
- An abstract base class for a Talos sentiment service.
- """
-
- @property
- @abstractmethod
- def name(self) -> str:
- """
- The name of the service.
- """
- pass
diff --git a/src/talos/services/abstract/twitter.py b/src/talos/services/abstract/twitter.py
deleted file mode 100644
index 6cb43bed..00000000
--- a/src/talos/services/abstract/twitter.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import Any
-
-from talos.services.abstract.service import Service
-
-
-class Twitter(Service, ABC):
- """
- An abstract base class for a Twitter service.
- """
-
- @property
- @abstractmethod
- def name(self) -> str:
- """
- The name of the service.
- """
- pass
-
- @abstractmethod
- def get_user_timeline(self, username: str, **kwargs: Any) -> Any:
- pass
-
- @abstractmethod
- def get_user_mentions(self, username: str, **kwargs: Any) -> Any:
- pass
-
- @abstractmethod
- def get_tweet(self, tweet_id: str, **kwargs: Any) -> Any:
- pass
-
- @abstractmethod
- def search(self, query: str, **kwargs: Any) -> Any:
- pass
-
-
-class TwitterPersona(Service, ABC):
- """
- An abstract base class for a Twitter persona service.
- """
-
- @property
- @abstractmethod
- def name(self) -> str:
- """
- The name of the service.
- """
- pass
diff --git a/src/talos/services/abstract/yield_manager.py b/src/talos/services/abstract/yield_manager.py
deleted file mode 100644
index 1ccbd105..00000000
--- a/src/talos/services/abstract/yield_manager.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from abc import ABC, abstractmethod
-
-
-class YieldManager(ABC):
- @abstractmethod
- def update_staking_apr(self, sentiment: float, sentiment_report: str) -> float:
- pass
diff --git a/src/talos/services/implementations/__init__.py b/src/talos/services/implementations/__init__.py
deleted file mode 100644
index 9f461f72..00000000
--- a/src/talos/services/implementations/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from .onchain_management import OnChainManagementService
-from .proposals import ProposalsService
-from .yield_manager import YieldManagerService
-
-__all__ = [
- "OnChainManagementService",
- "ProposalsService",
- "YieldManagerService",
-]
diff --git a/src/talos/services/implementations/devin.py b/src/talos/services/implementations/devin.py
deleted file mode 100644
index a4a22698..00000000
--- a/src/talos/services/implementations/devin.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from __future__ import annotations
-
-import requests
-from typing import Any, Dict, List, Optional
-from pydantic import PrivateAttr
-
-from talos.services.abstract.devin import Devin
-
-
-class DevinService(Devin):
- """
- A service for interacting with Devin AI for session management.
- """
-
- api_base_url: str = "https://api.devin.ai"
- api_key: Optional[str] = None
- _session: requests.Session = PrivateAttr()
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- self._session = requests.Session()
- if self.api_key:
- self._session.headers.update({
- "Authorization": f"Bearer {self.api_key}",
- "Content-Type": "application/json"
- })
-
- @property
- def name(self) -> str:
- return "devin"
-
- def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict[str, Any]:
- """
- Makes an HTTP request to the Devin API.
-
- Args:
- method: HTTP method (GET, POST, etc.)
- endpoint: API endpoint path
- **kwargs: Additional request parameters
-
- Returns:
- JSON response as dictionary
-
- Raises:
- Exception: If the request fails
- """
- url = f"{self.api_base_url.rstrip('/')}/{endpoint.lstrip('/')}"
-
- try:
- response = self._session.request(method, url, **kwargs)
- response.raise_for_status()
- return response.json()
- except requests.RequestException as e:
- raise Exception(f"Devin API request failed: {str(e)}")
-
- def get_all_sessions(self) -> List[Dict[str, Any]]:
- """
- Retrieves all sessions from Devin.
-
- Returns:
- List of session dictionaries containing session information.
- """
- result = self._make_request("GET", "/sessions")
- return result.get("sessions", [])
-
- def get_session_info(self, session_id: str) -> Dict[str, Any]:
- """
- Retrieves detailed information about a specific session.
-
- Args:
- session_id: The ID of the session to retrieve information for.
-
- Returns:
- Dictionary containing detailed session information.
- """
- return self._make_request("GET", f"/sessions/{session_id}")
-
- def send_message_to_session(self, session_id: str, message: str) -> Dict[str, Any]:
- """
- Sends a message to an existing Devin session.
-
- Args:
- session_id: The ID of the session to send message to.
- message: The message to send.
-
- Returns:
- Dictionary containing the message response result.
- """
- payload = {"message": message}
- return self._make_request("POST", f"/sessions/{session_id}/messages", json=payload)
-
- def create_session(self, description: str, **kwargs) -> Dict[str, Any]:
- """
- Creates a new Devin session.
-
- Args:
- description: The session description/task.
- **kwargs: Additional session parameters (idempotent, etc.)
-
- Returns:
- Dictionary containing the created session information.
- """
- payload = {"task": description, **kwargs}
- return self._make_request("POST", "/sessions", json=payload)
diff --git a/src/talos/services/implementations/github.py b/src/talos/services/implementations/github.py
deleted file mode 100644
index 101c45d6..00000000
--- a/src/talos/services/implementations/github.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from pydantic import BaseModel, PrivateAttr
-
-from talos.services.implementations.github_agent import GithubPRReviewAgent
-from talos.tools.github.tools import GithubTools
-
-
-class GithubService(BaseModel):
- """
- A service for reviewing Github pull requests.
- """
-
- _tools: GithubTools = PrivateAttr()
- _agent: GithubPRReviewAgent = PrivateAttr()
-
- token: str
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- self._tools = GithubTools(token=self.token)
- self._agent = GithubPRReviewAgent(token=self.token)
-
- @property
- def name(self) -> str:
- return "github_pr_review"
-
- def review_pr(self, user: str, repo: str, pr_number: int) -> str:
- diff = self._tools.get_pr_diff(user, repo, pr_number)
- comments = self._tools.get_pr_comments(user, repo, pr_number)
- files = self._tools.get_pr_files(user, repo, pr_number)
-
- input_str = f"Diff: {diff}\n\nComments: {comments}\n\nFiles: {files}"
- response = self._agent.run(input=input_str, user=user, project=repo)
- return response["output"]
diff --git a/src/talos/services/implementations/github_agent.py b/src/talos/services/implementations/github_agent.py
deleted file mode 100644
index fa602cb6..00000000
--- a/src/talos/services/implementations/github_agent.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import Any
-
-from langchain.agents import AgentExecutor, create_openai_tools_agent
-from langchain_core.prompts import ChatPromptTemplate
-from langchain_core.tools import tool
-from langchain_openai import ChatOpenAI
-from pydantic import PrivateAttr
-from pydantic.types import SecretStr
-
-from talos.services.abstract.service import Service
-from talos.tools.github.tools import GithubTools
-
-
-class GithubPRReviewAgent(Service):
- """
- An agent that reviews a pull request and provides feedback.
- """
-
- token: str
- _agent_executor: AgentExecutor = PrivateAttr()
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- github_tools = GithubTools(token=self.token)
- tools = [
- tool(github_tools.get_project_structure),
- tool(github_tools.get_file_content),
- ]
- llm = ChatOpenAI(api_key=SecretStr(self.token))
- with open("src/talos/prompts/github_pr_review_prompt.json") as f:
- prompt_config = json.load(f)
- prompt = ChatPromptTemplate.from_messages(
- [
- (
- "system",
- prompt_config["template"],
- ),
- ("user", "{input}"),
- ("placeholder", "{agent_scratchpad}"),
- ]
- )
- agent = create_openai_tools_agent(llm, tools, prompt)
- self._agent_executor = AgentExecutor(agent=agent, tools=tools)
-
- @property
- def name(self) -> str:
- return "github_pr_review_agent"
-
- def run(self, **kwargs: Any) -> Any:
- return self._agent_executor.invoke(kwargs)
diff --git a/src/talos/services/implementations/onchain_management.py b/src/talos/services/implementations/onchain_management.py
deleted file mode 100644
index 653e80de..00000000
--- a/src/talos/services/implementations/onchain_management.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from talos.services.abstract.onchain_management import OnChainManagement
-from talos.services.implementations.talos_sentiment import TalosSentimentService
-from talos.services.implementations.yield_manager import YieldManagerService
-
-
-class OnChainManagementService(OnChainManagement):
- """
- A discipline for on-chain management.
- """
-
- def __init__(
- self,
- yield_manager: YieldManagerService,
- sentiment_service: "TalosSentimentService",
- ):
- self.yield_manager = yield_manager
- self.sentiment_service = sentiment_service
-
- def get_treasury_balance(self) -> float:
- """
- Gets the balance of the treasury.
- """
- return 1000.0
-
- def add_to_vault(self, vault_address: str, token: str, amount: float) -> None:
- """
- Adds funds to a vault.
- """
- print(f"Adding {amount} of {token} to vault {vault_address}")
-
- def remove_from_vault(self, vault_address: str, amount: float) -> None:
- """
- Removes funds from a vault.
- """
- print(f"Removing {amount} from vault {vault_address}")
-
- def deploy_vault(self) -> str:
- """
- Deploys a new vault contract.
- """
- return "0x1234567890"
-
- def set_staking_apr(self) -> None:
- """
- Sets the staking APR.
- """
- sentiment = self.sentiment_service.analyze_sentiment(search_query="talos")
- if sentiment.score is not None:
- new_apr = self.yield_manager.update_staking_apr(sentiment.score, "\n".join(sentiment.answers))
- print(f"Setting staking APR to {new_apr}")
-
- def deploy_contract(self, bytecode: str, salt: str, chain_id: int, check_duplicates: bool = False) -> str:
- """
- Deploy a smart contract with optional duplicate checking.
- """
- from talos.tools.contract_deployment import ContractDeploymentTool
-
- tool = ContractDeploymentTool()
- result = tool._run_unsupervised(
- bytecode=bytecode, salt=salt, chain_id=chain_id, check_duplicates=check_duplicates
- )
- return result.contract_address
-
- def check_deployment_duplicate(self, bytecode: str, salt: str, chain_id: int) -> bool:
- """
- Check if a contract deployment would be a duplicate.
- """
- from talos.database.models import ContractDeployment
- from talos.database.session import get_session
- from talos.utils.contract_deployment import calculate_contract_signature
-
- signature = calculate_contract_signature(bytecode, salt)
-
- with get_session() as session:
- existing = (
- session.query(ContractDeployment)
- .filter(ContractDeployment.contract_signature == signature, ContractDeployment.chain_id == chain_id)
- .first()
- )
-
- return existing is not None
diff --git a/src/talos/services/implementations/proposals/__init__.py b/src/talos/services/implementations/proposals/__init__.py
deleted file mode 100644
index 974b21ad..00000000
--- a/src/talos/services/implementations/proposals/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .proposals_service import ProposalsService
-
-__all__ = ["ProposalsService"]
diff --git a/src/talos/services/implementations/proposals/proposals_service.py b/src/talos/services/implementations/proposals/proposals_service.py
deleted file mode 100644
index 3c979bb5..00000000
--- a/src/talos/services/implementations/proposals/proposals_service.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-from pydantic import ConfigDict
-
-from talos.models.proposals import Proposal, ProposalResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.services.abstract.proposal_agent import ProposalAgent
-from talos.skills.proposals import ProposalsSkill
-
-
-class ProposalsService(ProposalAgent):
- """
- A service for evaluating proposals using the ProposalsSkill.
-
- This service acts as a bridge between the abstract ProposalAgent interface
- and the concrete ProposalsSkill implementation.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager | None = None
- _skill: ProposalsSkill | None = None
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
-
- if self.prompt_manager is None:
- self.prompt_manager = FilePromptManager("src/talos/prompts")
-
- self._skill = ProposalsSkill(
- llm=self.llm, prompt_manager=self.prompt_manager, rag_dataset=self.rag_dataset, tools=self.tools
- )
-
- def evaluate_proposal(self, proposal: Proposal) -> ProposalResponse:
- """
- Evaluates a proposal and returns a recommendation.
-
- :param proposal: The proposal to evaluate.
- :return: The agent's recommendation with confidence and reasoning.
- """
- if self._skill is None:
- raise RuntimeError("ProposalsSkill not initialized")
-
- return self._skill.evaluate_proposal(proposal)
diff --git a/src/talos/services/implementations/talos_sentiment.py b/src/talos/services/implementations/talos_sentiment.py
deleted file mode 100644
index 4c55edd8..00000000
--- a/src/talos/services/implementations/talos_sentiment.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import json
-from datetime import datetime, timezone
-from typing import Optional
-
-from pydantic import ConfigDict
-
-from talos.models.services import TwitterSentimentResponse
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.services.abstract.service import Service
-from talos.tools.twitter_client import TweepyClient
-from talos.utils.llm import LLMClient
-
-
-class TalosSentimentService(Service):
- """
- A service for analyzing the sentiment of tweets about Talos.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- prompt_manager: FilePromptManager
- twitter_client: TweepyClient
- llm_client: LLMClient
-
- @property
- def name(self) -> str:
- return "talos_sentiment"
-
- def analyze_sentiment(
- self, search_query: str = "talos", start_time: Optional[str] = None
- ) -> TwitterSentimentResponse:
- sentiment_prompt_obj: Prompt | None = self.prompt_manager.get_prompt("talos_sentiment_single_prompt")
- if sentiment_prompt_obj is None:
- raise ValueError("Sentiment prompt not found")
- sentiment_prompt = sentiment_prompt_obj.template
- tweets = self.twitter_client.search_tweets(search_query, start_time=start_time)
-
- if not tweets or not tweets.data:
- return TwitterSentimentResponse(answers=["No tweets found for the given query."], score=None)
-
- users = {user["id"]: user for user in tweets.includes.get("users", [])} if tweets.includes else {}
-
- tweet_data = []
- for tweet in tweets.data:
- author_id = tweet.author_id
- author = users.get(author_id, {})
-
- followers = author.get("public_metrics", {}).get("followers_count", 1)
- total_engagement = (
- tweet.public_metrics.get("like_count", 0)
- + tweet.public_metrics.get("retweet_count", 0)
- + tweet.public_metrics.get("reply_count", 0)
- + tweet.public_metrics.get("quote_count", 0)
- )
- engagement_rate = total_engagement / max(followers, 1) * 100
-
- tweet_data.append(
- {
- "text": tweet.text,
- "author": author.get("username", "unknown"),
- "followers": followers,
- "likes": tweet.public_metrics.get("like_count", 0),
- "retweets": tweet.public_metrics.get("retweet_count", 0),
- "replies": tweet.public_metrics.get("reply_count", 0),
- "quotes": tweet.public_metrics.get("quote_count", 0),
- "total_engagement": total_engagement,
- "engagement_rate": round(engagement_rate, 2),
- "age_in_days": (
- datetime.now(timezone.utc) - datetime.fromisoformat(tweet.created_at.replace("Z", "+00:00"))
- ).days,
- }
- )
- prompt = sentiment_prompt.format(tweets=json.dumps(tweet_data))
- response = self.llm_client.reasoning(prompt)
- try:
- response_data = json.loads(response)
- score = response_data["score"]
- report = response_data["report"]
- except (json.JSONDecodeError, KeyError):
- return TwitterSentimentResponse(answers=["Could not analyze the sentiment of any tweets."], score=None)
-
- return TwitterSentimentResponse(answers=[report], score=score)
diff --git a/src/talos/services/implementations/yield_manager.py b/src/talos/services/implementations/yield_manager.py
deleted file mode 100644
index 69622edb..00000000
--- a/src/talos/services/implementations/yield_manager.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import json
-import logging
-
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.services.abstract.yield_manager import YieldManager
-from talos.tools.twitter_client import TweepyClient
-from talos.utils.dexscreener import DexscreenerClient
-from talos.utils.geckoterminal import GeckoTerminalClient
-from talos.utils.llm import LLMClient
-
-
-class YieldManagerService(YieldManager):
- def __init__(
- self,
- dexscreener_client: DexscreenerClient,
- gecko_terminal_client: GeckoTerminalClient,
- llm_client: LLMClient,
- prompt_name: str = "yield_management",
- min_yield: float = 0.01,
- max_yield: float = 0.20,
- ):
- if min_yield <= 0 or max_yield <= 0:
- raise ValueError("Min and max yield must be positive")
- if min_yield >= max_yield:
- raise ValueError("Min yield must be less than max yield")
-
- self.dexscreener_client = dexscreener_client
- self.gecko_terminal_client = gecko_terminal_client
- self.llm_client = llm_client
- self.min_yield = min_yield
- self.max_yield = max_yield
- self.twitter_client = TweepyClient()
- self.prompt_manager = FilePromptManager("src/talos/prompts")
- self.prompt = self.prompt_manager.get_prompt(prompt_name)
-
- def update_staking_apr(self, sentiment: float, sentiment_report: str) -> float:
- logging.info("Updating staking APR...")
- dexscreener_data = self.dexscreener_client.get_talos_data()
- logging.info(f"Dexscreener data: {dexscreener_data}")
-
- logging.info(f"Social media sentiment: {sentiment}")
- logging.info(f"Sentiment report: {sentiment_report}")
-
- staked_supply_percentage = self.get_staked_supply_percentage()
- logging.info(f"Staked supply percentage: {staked_supply_percentage}")
-
- ohlcv_data = self.gecko_terminal_client.get_ohlcv_data()
- logging.info(f"GeckoTerminal OHLCV data: {ohlcv_data}")
-
- data_scores = self._calculate_data_source_scores(
- dexscreener_data, ohlcv_data, sentiment, staked_supply_percentage
- )
- logging.info(f"Data source scores: {data_scores}")
-
- weighted_apr = self._calculate_weighted_apr_recommendation(data_scores)
- logging.info(f"Weighted APR recommendation: {weighted_apr}")
-
- if not self.prompt:
- raise ValueError("Prompt not found")
-
- prompt = self.prompt.format(
- price=dexscreener_data.price_usd,
- change=dexscreener_data.price_change_h24,
- volume=dexscreener_data.volume_h24,
- sentiment=sentiment,
- staked_supply_percentage=staked_supply_percentage,
- ohlcv_data=ohlcv_data.model_dump_json(),
- )
-
- enhanced_prompt = f"{prompt}\n\nBased on weighted analysis of the data sources, the recommended APR is {weighted_apr:.4f}. Please consider this recommendation along with the raw data. The APR must be between {self.min_yield} and {self.max_yield}."
-
- response = self.llm_client.reasoning(enhanced_prompt, web_search=True)
- try:
- response_json = json.loads(response)
- llm_apr = response_json["apr"]
- explanation = response_json["explanation"]
- logging.info(f"LLM explanation: {explanation}")
- except (json.JSONDecodeError, KeyError) as e:
- logging.error(f"Failed to parse LLM response: {e}")
- logging.info("Using weighted APR recommendation as fallback")
- return max(self.min_yield, min(self.max_yield, weighted_apr))
-
- final_apr = max(self.min_yield, min(self.max_yield, llm_apr))
-
- if final_apr != llm_apr:
- logging.info(f"APR bounded from {llm_apr} to {final_apr} (min: {self.min_yield}, max: {self.max_yield})")
-
- return final_apr
-
- def get_staked_supply_percentage(self) -> float:
- return 0.45
-
- def _calculate_data_source_scores(self, dexscreener_data, ohlcv_data, sentiment: float, staked_supply_percentage: float) -> dict:
- scores = {}
-
- price_change = dexscreener_data.price_change_h24
- if price_change > 0.1:
- scores['price_trend'] = 0.8
- elif price_change > 0.05:
- scores['price_trend'] = 0.6
- elif price_change > -0.05:
- scores['price_trend'] = 0.5
- elif price_change > -0.1:
- scores['price_trend'] = 0.3
- else:
- scores['price_trend'] = 0.1
-
- volume = dexscreener_data.volume_h24
- if volume > 1000000:
- scores['volume_confidence'] = 0.8
- elif volume > 500000:
- scores['volume_confidence'] = 0.6
- elif volume > 100000:
- scores['volume_confidence'] = 0.4
- else:
- scores['volume_confidence'] = 0.2
-
- scores['sentiment'] = max(0.0, min(1.0, sentiment / 100.0))
-
- if staked_supply_percentage > 0.8:
- scores['supply_pressure'] = 0.2
- elif staked_supply_percentage > 0.6:
- scores['supply_pressure'] = 0.4
- elif staked_supply_percentage > 0.4:
- scores['supply_pressure'] = 0.6
- elif staked_supply_percentage > 0.2:
- scores['supply_pressure'] = 0.8
- else:
- scores['supply_pressure'] = 1.0
-
- if ohlcv_data.ohlcv_list:
- recent_ohlcv = ohlcv_data.ohlcv_list[-5:]
- if len(recent_ohlcv) >= 2:
- price_range = max(item.high for item in recent_ohlcv) - min(item.low for item in recent_ohlcv)
- avg_price = sum(item.close for item in recent_ohlcv) / len(recent_ohlcv)
- volatility = price_range / avg_price if avg_price > 0 else 0
-
- if volatility > 0.2:
- scores['volatility'] = 0.3
- elif volatility > 0.1:
- scores['volatility'] = 0.5
- else:
- scores['volatility'] = 0.7
- else:
- scores['volatility'] = 0.5
- else:
- scores['volatility'] = 0.5
-
- return scores
-
- def _calculate_weighted_apr_recommendation(self, scores: dict) -> float:
- weights = {
- 'price_trend': 0.25,
- 'volume_confidence': 0.15,
- 'sentiment': 0.20,
- 'supply_pressure': 0.25,
- 'volatility': 0.15
- }
-
- weighted_score = sum(scores[factor] * weights[factor] for factor in weights.keys())
-
- apr_recommendation = self.min_yield + (weighted_score * (self.max_yield - self.min_yield))
-
- return apr_recommendation
diff --git a/src/talos/services/key_management.py b/src/talos/services/key_management.py
deleted file mode 100644
index ef55be76..00000000
--- a/src/talos/services/key_management.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import os
-
-from nacl.public import PrivateKey, PublicKey, SealedBox
-
-
-class KeyManagement:
- def __init__(self, key_dir: str = ".keys"):
- self.key_dir = key_dir
- self.private_key_path = os.path.join(self.key_dir, "private_key.pem")
- self.public_key_path = os.path.join(self.key_dir, "public_key.pem")
- if not os.path.exists(self.key_dir):
- os.makedirs(self.key_dir)
-
- def generate_keys(self):
- """
- Generates a new Curve25519 key pair and saves them to the key_dir.
- """
- private_key = PrivateKey.generate()
- public_key = private_key.public_key
-
- with open(self.private_key_path, "wb") as f:
- f.write(private_key.encode())
-
- with open(self.public_key_path, "wb") as f:
- f.write(public_key.encode())
-
- def get_public_key(self) -> bytes:
- """
- Returns the public key as bytes.
- """
- if not os.path.exists(self.public_key_path):
- self.generate_keys()
- with open(self.public_key_path, "rb") as f:
- return f.read()
-
- def get_private_key(self) -> bytes:
- """
- Returns the private key as bytes.
- """
- if not os.path.exists(self.private_key_path):
- self.generate_keys()
- with open(self.private_key_path, "rb") as f:
- return f.read()
-
- def encrypt(self, data: str, public_key_bytes: bytes) -> bytes:
- """
- Encrypts data using the public key.
- """
- public_key = PublicKey(public_key_bytes)
- sealed_box = SealedBox(public_key)
- return sealed_box.encrypt(data.encode())
-
- def decrypt(self, encrypted_data: bytes) -> str:
- """
- Decrypts data using the private key.
- """
- private_key = PrivateKey(self.get_private_key())
- unseal_box = SealedBox(private_key)
- return unseal_box.decrypt(encrypted_data).decode()
diff --git a/src/talos/settings.py b/src/talos/settings.py
deleted file mode 100644
index 57afaca7..00000000
--- a/src/talos/settings.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from typing import Optional
-import logging
-from pydantic import model_validator
-from pydantic_settings import BaseSettings
-
-logger = logging.getLogger(__name__)
-
-
-class GitHubSettings(BaseSettings):
- GITHUB_API_TOKEN: Optional[str] = None
-
- @model_validator(mode="after")
- def validate_github_token(self):
- if not self.GITHUB_API_TOKEN:
- raise ValueError("GITHUB_API_TOKEN environment variable is required but not set")
-
- from .utils.validation import validate_api_token_format, mask_sensitive_data
- if not validate_api_token_format(self.GITHUB_API_TOKEN, 'github'):
- logger.warning("GitHub API token format appears invalid")
-
- masked_token = mask_sensitive_data(self.GITHUB_API_TOKEN)
- logger.info(f"GitHub settings initialized with token: {masked_token}")
- return self
-
-
-class OpenAISettings(BaseSettings):
- OPENAI_API_KEY: Optional[str] = None
-
- @model_validator(mode="after")
- def validate_openai_key(self):
- if not self.OPENAI_API_KEY:
- raise ValueError("OPENAI_API_KEY environment variable is required but not set")
-
- from .utils.validation import validate_api_token_format, mask_sensitive_data
- if not validate_api_token_format(self.OPENAI_API_KEY, 'openai'):
- logger.warning("OpenAI API key format appears invalid")
-
- masked_key = mask_sensitive_data(self.OPENAI_API_KEY)
- logger.info(f"OpenAI settings initialized with key: {masked_key}")
- return self
-
-
-class PerspectiveSettings(BaseSettings):
- PERSPECTIVE_API_KEY: Optional[str] = None
-
-
-class GitBookSettings(BaseSettings):
- GITBOOK_API_KEY: Optional[str] = None
-
- @model_validator(mode="after")
- def validate_gitbook_key(self):
- if not self.GITBOOK_API_KEY:
- raise ValueError("GITBOOK_API_KEY environment variable is required but not set")
-
- from .utils.validation import mask_sensitive_data
- masked_key = mask_sensitive_data(self.GITBOOK_API_KEY)
- logger.info(f"GitBook settings initialized with key: {masked_key}")
- return self
-
-
-class TwitterOAuthSettings(BaseSettings):
- TWITTER_CONSUMER_KEY: Optional[str] = None
- TWITTER_CONSUMER_SECRET: Optional[str] = None
- TWITTER_ACCESS_TOKEN: Optional[str] = None
- TWITTER_ACCESS_TOKEN_SECRET: Optional[str] = None
-
- @model_validator(mode="after")
- def validate_twitter_oauth(self):
- required_fields = [self.TWITTER_CONSUMER_KEY, self.TWITTER_CONSUMER_SECRET,
- self.TWITTER_ACCESS_TOKEN, self.TWITTER_ACCESS_TOKEN_SECRET]
- if not all(required_fields):
- raise ValueError("All Twitter OAuth environment variables are required: TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET")
-
- from .utils.validation import mask_sensitive_data
- logger.info(f"Twitter OAuth settings initialized with consumer key: {mask_sensitive_data(self.TWITTER_CONSUMER_KEY)}")
- return self
diff --git a/src/talos/skills/base.py b/src/talos/skills/base.py
deleted file mode 100644
index fefcfb7b..00000000
--- a/src/talos/skills/base.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from __future__ import annotations
-
-import threading
-import time
-import uuid
-from abc import ABC, abstractmethod
-from typing import Any, Dict
-
-from langchain_core.tools import tool
-from pydantic import BaseModel, PrivateAttr
-
-from talos.models.services import Ticket, TicketCreationRequest, TicketResult, TicketStatus
-
-
-class Skill(BaseModel, ABC):
- """
- An abstract base class for a skill.
- Skills are a way to organize and manage the agent's actions.
- They are LLM driven actions, which means that they are powered by a
- language model. This allows them to be more flexible and powerful
- than traditional tools.
- """
-
- _tickets: Dict[str, Ticket] = PrivateAttr(default_factory=dict)
- _results: Dict[str, TicketResult] = PrivateAttr(default_factory=dict)
- _threads: Dict[str, threading.Thread] = PrivateAttr(default_factory=dict)
-
- def model_post_init(self, __context: Any) -> None:
- pass
-
- @property
- @abstractmethod
- def name(self) -> str:
- """
- The name of the skill.
- """
- pass
-
- @abstractmethod
- def run(self, **kwargs: Any) -> Any:
- """
- Runs the skill.
- """
- pass
-
- def create_ticket_tool(self):
- @tool(f"create_{self.name}_ticket")
- def create_ticket(**kwargs: Any) -> Ticket:
- """
- Creates a ticket for the {self.name} skill.
-
- Args:
- **kwargs: The arguments to pass to the {self.name} skill.
-
- Returns:
- The ticket object.
- """
- request = TicketCreationRequest(tool=self.name, tool_args=kwargs)
- ticket_id = str(uuid.uuid4())
- ticket = Ticket(
- ticket_id=ticket_id,
- status=TicketStatus.PENDING,
- created_at=str(time.time()),
- updated_at=str(time.time()),
- request=request,
- )
- self._tickets[ticket_id] = ticket
- thread = threading.Thread(target=self._run_in_background, args=(ticket_id, request.tool_args))
- self._threads[ticket_id] = thread
- thread.start()
- return ticket
-
- return create_ticket
-
- def _run_in_background(self, ticket_id: str, tool_args: Dict[str, Any]) -> None:
- """
- Runs the skill in the background.
- """
- self._tickets[ticket_id].status = TicketStatus.RUNNING
- self._tickets[ticket_id].updated_at = str(time.time())
- try:
- result = self.run(**tool_args)
- self._results[ticket_id] = TicketResult(
- ticket_id=ticket_id,
- status=TicketStatus.COMPLETED,
- result=result,
- error=None,
- )
- self._tickets[ticket_id].status = TicketStatus.COMPLETED
- except Exception as e:
- self._results[ticket_id] = TicketResult(
- ticket_id=ticket_id,
- status=TicketStatus.FAILED,
- result=None,
- error=str(e),
- )
- self._tickets[ticket_id].status = TicketStatus.FAILED
- finally:
- self._tickets[ticket_id].updated_at = str(time.time())
-
- def get_ticket_status(self, ticket_id: str) -> Ticket | None:
- """
- Checks on the status of a ticket number with the skill.
- """
- return self._tickets.get(ticket_id)
-
- def cancel_ticket(self, ticket_id: str) -> Ticket | None:
- """
- Cancels the execution of a ticket number.
- """
- if ticket_id in self._threads:
- # Note: This is a simplistic way to "cancel" a thread.
- # It doesn't actually stop the thread, but it does prevent
- # the result from being stored.
- self._threads[ticket_id].join(timeout=0.1)
- if self._threads[ticket_id].is_alive():
- # If the thread is still alive, we can't do much more
- # to stop it without more complex mechanisms.
- pass
- del self._threads[ticket_id]
- self._tickets[ticket_id].status = TicketStatus.CANCELLED
- self._tickets[ticket_id].updated_at = str(time.time())
- return self._tickets[ticket_id]
- return None
-
- def get_ticket_result(self, ticket_id: str) -> TicketResult | None:
- """
- If it is finalized, to get the result of the execution.
- """
- return self._results.get(ticket_id)
-
- def get_all_tickets(self) -> list[Ticket]:
- """
- Returns all tickets for this skill.
- """
- return list(self._tickets.values())
diff --git a/src/talos/skills/codebase_evaluation.py b/src/talos/skills/codebase_evaluation.py
deleted file mode 100644
index b998cc5f..00000000
--- a/src/talos/skills/codebase_evaluation.py
+++ /dev/null
@@ -1,221 +0,0 @@
-from __future__ import annotations
-
-import os
-from pathlib import Path
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-from langchain_core.prompts import PromptTemplate
-from pydantic import ConfigDict, Field
-
-from talos.models.proposals import QueryResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.github.tools import GithubTools
-
-
-class CodebaseEvaluationSkill(Skill):
- """
- A skill for evaluating codebase quality and suggesting improvements.
-
- This skill analyzes repository structure, code patterns, documentation,
- and other quality metrics to provide actionable improvement recommendations.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- github_tools: GithubTools | None = None
-
- @property
- def name(self) -> str:
- return "codebase_evaluation_skill"
-
- def run(self, **kwargs: Any) -> QueryResponse:
- """
- Evaluates a codebase and returns improvement recommendations.
-
- Args:
- repo_path: Local path to repository (optional)
- github_user: GitHub username (optional, requires github_project)
- github_project: GitHub project name (optional, requires github_user)
-
- Returns:
- QueryResponse with evaluation results and recommendations
- """
- repo_path = kwargs.get("repo_path")
- github_user = kwargs.get("github_user")
- github_project = kwargs.get("github_project")
-
- if not repo_path and not (github_user and github_project):
- raise ValueError("Must provide either repo_path or both github_user and github_project")
-
- if github_user and github_project:
- return self._evaluate_github_repo(github_user, github_project)
- elif repo_path:
- return self._evaluate_local_repo(repo_path)
- else:
- raise ValueError("Must provide either repo_path or both github_user and github_project")
-
- def _evaluate_github_repo(self, user: str, project: str) -> QueryResponse:
- """Evaluate a GitHub repository."""
- if not self.github_tools:
- try:
- from talos.settings import GitHubSettings
- github_settings = GitHubSettings()
- if github_settings.GITHUB_API_TOKEN:
- self.github_tools = GithubTools(token=github_settings.GITHUB_API_TOKEN)
- else:
- raise ValueError("GitHub API token not available")
- except Exception:
- raise ValueError("GitHub tools not configured and cannot be initialized")
-
- structure = self._analyze_github_structure(user, project)
- key_files = self._get_key_files_content(user, project)
- evaluation = self._generate_evaluation(structure, key_files, f"{user}/{project}")
-
- return QueryResponse(answers=[evaluation])
-
- def _evaluate_local_repo(self, repo_path: str) -> QueryResponse:
- """Evaluate a local repository."""
- if not os.path.exists(repo_path):
- raise ValueError(f"Repository path does not exist: {repo_path}")
-
- structure = self._analyze_local_structure(repo_path)
- key_files = self._get_local_files_content(repo_path)
- evaluation = self._generate_evaluation(structure, key_files, repo_path)
-
- return QueryResponse(answers=[evaluation])
-
- def _analyze_github_structure(self, user: str, project: str) -> dict[str, Any]:
- """Analyze GitHub repository structure."""
- if not self.github_tools:
- return {"error": "GitHub tools not available"}
-
- try:
- root_contents = self.github_tools.get_project_structure(user, project)
-
- structure = {
- "total_files": len(root_contents),
- "directories": [f for f in root_contents if "." not in f.split("/")[-1]],
- "files": [f for f in root_contents if "." in f.split("/")[-1]],
- "has_readme": any("readme" in f.lower() for f in root_contents),
- "has_tests": any("test" in f.lower() for f in root_contents),
- "has_docs": any("doc" in f.lower() for f in root_contents),
- "config_files": [f for f in root_contents if f.split("/")[-1] in [
- "package.json", "requirements.txt", "Cargo.toml", "go.mod",
- "pom.xml", "build.gradle", "Makefile", "pyproject.toml"
- ]]
- }
-
- return structure
- except Exception as e:
- return {"error": f"Failed to analyze structure: {str(e)}"}
-
- def _analyze_local_structure(self, repo_path: str) -> dict[str, Any]:
- """Analyze local repository structure."""
- repo = Path(repo_path)
- all_files = list(repo.rglob("*"))
-
- structure = {
- "total_files": len([f for f in all_files if f.is_file()]),
- "directories": [str(f.relative_to(repo)) for f in all_files if f.is_dir()],
- "files": [str(f.relative_to(repo)) for f in all_files if f.is_file()],
- "has_readme": any("readme" in f.name.lower() for f in all_files),
- "has_tests": any("test" in str(f).lower() for f in all_files),
- "has_docs": any("doc" in str(f).lower() for f in all_files),
- "config_files": [str(f.relative_to(repo)) for f in all_files
- if f.name in ["package.json", "requirements.txt", "Cargo.toml",
- "go.mod", "pom.xml", "build.gradle", "Makefile", "pyproject.toml"]]
- }
-
- return structure
-
- def _get_key_files_content(self, user: str, project: str) -> dict[str, str]:
- """Get content of key files from GitHub repository."""
- key_files: dict[str, str] = {}
-
- if not self.github_tools:
- return key_files
-
- for readme_name in ["README.md", "README.rst", "README.txt", "readme.md"]:
- try:
- content = self.github_tools.get_file_content(user, project, readme_name)
- key_files["readme"] = content[:2000]
- break
- except Exception:
- continue
-
- try:
- structure = self.github_tools.get_project_structure(user, project)
- source_files = [f for f in structure if f.endswith(('.py', '.js', '.ts', '.java', '.go', '.rs'))][:5]
-
- for file_path in source_files:
- try:
- content = self.github_tools.get_file_content(user, project, file_path)
- key_files[file_path] = content[:1000]
- except Exception:
- continue
- except Exception:
- pass
-
- return key_files
-
- def _get_local_files_content(self, repo_path: str) -> dict[str, str]:
- """Get content of key files from local repository."""
- key_files: dict[str, str] = {}
- repo = Path(repo_path)
-
- for readme_file in repo.glob("README*"):
- try:
- content = readme_file.read_text(encoding='utf-8')
- key_files["readme"] = content[:2000]
- break
- except Exception:
- continue
-
- source_patterns = ["**/*.py", "**/*.js", "**/*.ts", "**/*.java", "**/*.go", "**/*.rs"]
- source_files = []
-
- for pattern in source_patterns:
- source_files.extend(list(repo.glob(pattern))[:2])
- if len(source_files) >= 5:
- break
-
- for file_path in source_files[:5]:
- try:
- content = file_path.read_text(encoding='utf-8')
- key_files[str(file_path.relative_to(repo))] = content[:1000]
- except Exception:
- continue
-
- return key_files
-
- def _generate_evaluation(self, structure: dict, key_files: dict, repo_identifier: str) -> str:
- """Generate codebase evaluation using LLM."""
- prompt = self.prompt_manager.get_prompt("codebase_evaluation_prompt")
- if not prompt:
- raise ValueError("Could not find prompt 'codebase_evaluation_prompt'")
-
- analysis_data = {
- "repo_identifier": repo_identifier,
- "structure": structure,
- "key_files": key_files,
- "file_count": structure.get("total_files", 0),
- "has_readme": structure.get("has_readme", False),
- "has_tests": structure.get("has_tests", False),
- "has_docs": structure.get("has_docs", False),
- "config_files": structure.get("config_files", [])
- }
-
- prompt_template = PromptTemplate(
- template=prompt.template,
- input_variables=prompt.input_variables,
- )
-
- chain = prompt_template | self.llm
- response = chain.invoke(analysis_data)
-
- return response.content
diff --git a/src/talos/skills/codebase_implementation.py b/src/talos/skills/codebase_implementation.py
deleted file mode 100644
index a910d3e6..00000000
--- a/src/talos/skills/codebase_implementation.py
+++ /dev/null
@@ -1,589 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-import time
-from typing import Any, Dict, List, Optional, TypedDict
-
-from langchain_core.language_models import BaseLanguageModel
-from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
-from langgraph.checkpoint.memory import MemorySaver
-from langgraph.graph import END, START, StateGraph
-from pydantic import ConfigDict
-
-from talos.prompts.prompt_manager import PromptManager
-from talos.services.implementations.devin import DevinService
-from talos.skills.base import Skill
-from talos.tools.document_loader import DatasetSearchTool, DocumentLoaderTool
-from talos.tools.github.tools import GithubTools
-
-
-class CodebaseImplementationState(TypedDict):
- """State that flows through the codebase implementation workflow."""
-
- messages: List[BaseMessage]
- original_request: str
- repository_url: Optional[str]
- additional_context: str
- technology_stack: Optional[str]
- gathered_info: Dict[str, Any]
- tool_documentation: Dict[str, Any]
- plan: Dict[str, Any]
- user_approval: Optional[bool]
- user_feedback: Optional[str]
- task_breakdown: List[Dict[str, Any]]
- devin_session_id: Optional[str]
- progress_updates: List[Dict[str, Any]]
- final_result: Dict[str, Any]
- metadata: Dict[str, Any]
-
-
-class CodebaseImplementationSkill(Skill):
- model_config = ConfigDict(arbitrary_types_allowed=True)
- """
- A skill for implementing codebases using LangGraph workflow orchestration and Devin integration.
-
- This skill orchestrates a multi-step workflow:
- 1. Information gathering from GitHub/internet
- 2. Plan creation using reasoning
- 3. User approval process
- 4. Task breakdown into implementable steps
- 5. Devin execution and monitoring
- """
-
- model_config = {"arbitrary_types_allowed": True}
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager
- devin_service: Optional[DevinService] = None
- github_tools: Optional[GithubTools] = None
- document_loader: Optional[DocumentLoaderTool] = None
- dataset_search: Optional[DatasetSearchTool] = None
-
- _graph: Optional[StateGraph] = None
- _compiled_graph: Optional[Any] = None
- _checkpointer: Optional[MemorySaver] = None
-
- def model_post_init(self, __context: Any) -> None:
- super().model_post_init(__context)
- try:
- if not self.document_loader:
- from talos.data.dataset_manager import DatasetManager
-
- dataset_manager = DatasetManager(verbose=False)
- self.document_loader = DocumentLoaderTool(dataset_manager)
- if not self.dataset_search:
- from talos.data.dataset_manager import DatasetManager
-
- dataset_manager = DatasetManager(verbose=False)
- self.dataset_search = DatasetSearchTool(dataset_manager)
- except Exception:
- pass
- self._setup_workflow()
-
- @property
- def name(self) -> str:
- return "codebase_implementation"
-
- def _setup_workflow(self) -> None:
- """Initialize the LangGraph StateGraph workflow."""
- self._checkpointer = MemorySaver()
- self._graph = StateGraph(CodebaseImplementationState)
-
- self._graph.add_node("information_gatherer", self._gather_information)
- self._graph.add_node("tool_documentation_analyzer", self._analyze_tool_documentation)
- self._graph.add_node("plan_creator", self._create_plan)
- self._graph.add_node("user_approval_handler", self._handle_user_approval)
- self._graph.add_node("task_breakdown", self._breakdown_tasks)
- self._graph.add_node("devin_executor", self._execute_with_devin)
- self._graph.add_node("progress_monitor", self._monitor_progress)
-
- self._graph.add_edge(START, "information_gatherer")
- self._graph.add_edge("information_gatherer", "tool_documentation_analyzer")
- self._graph.add_edge("tool_documentation_analyzer", "plan_creator")
- self._graph.add_edge("plan_creator", "user_approval_handler")
-
- self._graph.add_conditional_edges(
- "user_approval_handler",
- self._determine_approval_path,
- {"approved": "task_breakdown", "rejected": "plan_creator", "pending": END},
- )
-
- self._graph.add_edge("task_breakdown", "devin_executor")
- self._graph.add_edge("devin_executor", "progress_monitor")
- self._graph.add_edge("progress_monitor", END)
-
- self._compiled_graph = self._graph.compile(checkpointer=self._checkpointer)
-
- async def _gather_information(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 1: Gather information from GitHub and other sources."""
- repository_url = state.get("repository_url")
- request = state["original_request"]
- technology_stack = state.get("technology_stack", "")
-
- info_prompt = f"""
- You are an expert software architect tasked with gathering information for implementing: "{request}"
-
- Repository URL: {repository_url or "Not provided"}
- Technology Stack Requirements: {technology_stack or "Not specified"}
-
- Analyze and gather the following information:
- 1. If repository URL is provided, analyze the codebase structure, technologies used, and existing patterns
- 2. Identify key requirements from the implementation request
- 3. Extract technology stack from repository or use provided stack requirements
- 4. Research relevant technologies, frameworks, and best practices
- 5. Consider potential challenges and dependencies
- 6. Identify tools and libraries that will need documentation analysis
-
- Provide a comprehensive information summary that will inform the implementation plan.
- Focus on identifying specific tools, frameworks, and technologies that will be used.
- """
-
- gathered_info = {}
- identified_tools = []
-
- if repository_url and self.github_tools:
- try:
- repo_parts = repository_url.replace("https://github.com/", "").split("/")
- if len(repo_parts) >= 2:
- owner, repo = repo_parts[0], repo_parts[1]
-
- repo_info = {"owner": owner, "name": repo}
- file_structure = self.github_tools.get_project_structure(owner, repo)
- readme_content = self.github_tools.get_file_content(owner, repo, "README.md")
-
- # Try to get package files to identify dependencies
- package_files = []
- for filename in ["package.json", "requirements.txt", "Cargo.toml", "go.mod", "pom.xml"]:
- try:
- content = self.github_tools.get_file_content(owner, repo, filename)
- package_files.append({"filename": filename, "content": content})
- except Exception:
- continue
-
- gathered_info["repository"] = {
- "info": repo_info,
- "structure": file_structure,
- "readme": readme_content,
- "package_files": package_files,
- }
-
- for package_file in package_files:
- if package_file["filename"] == "package.json":
- identified_tools.extend(["npm", "node.js", "javascript"])
- elif package_file["filename"] == "requirements.txt":
- identified_tools.extend(["pip", "python"])
- elif package_file["filename"] == "Cargo.toml":
- identified_tools.extend(["cargo", "rust"])
- elif package_file["filename"] == "go.mod":
- identified_tools.extend(["go"])
- elif package_file["filename"] == "pom.xml":
- identified_tools.extend(["maven", "java"])
-
- except Exception as e:
- gathered_info["repository_error"] = str(e) # type: ignore[assignment]
-
- if technology_stack:
- stack_tools = [tool.strip().lower() for tool in technology_stack.split(",")]
- identified_tools.extend(stack_tools)
-
- response = await self.llm.ainvoke([HumanMessage(content=info_prompt)])
- gathered_info["analysis"] = str(response.content if response.content else "") # type: ignore[assignment]
- gathered_info["identified_tools"] = list(set(identified_tools)) # type: ignore[assignment]
- gathered_info["timestamp"] = float(time.time()) # type: ignore[assignment]
-
- state["gathered_info"] = gathered_info
- state["messages"].append(AIMessage(content="Information gathering completed"))
-
- return state
-
- async def _analyze_tool_documentation(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 2: Analyze tool documentation for identified technologies."""
- identified_tools = state["gathered_info"].get("identified_tools", [])
-
- tool_documentation = {}
-
- if identified_tools and self.document_loader:
- doc_analysis_prompt = f"""
- You are a technical documentation analyst. For the following tools/technologies: {", ".join(identified_tools)}
-
- Identify the most important documentation URLs that should be analyzed for implementation guidance.
- Focus on:
- 1. Official documentation sites
- 2. Getting started guides
- 3. API references
- 4. Best practices documentation
- 5. Integration guides
-
- Provide a list of URLs for each tool that would be most valuable for implementation planning.
- Format as: Tool: [url1, url2, ...]
- """
-
- try:
- response = await self.llm.ainvoke([HumanMessage(content=doc_analysis_prompt)])
- doc_urls_analysis = response.content
-
- tool_documentation["url_analysis"] = doc_urls_analysis
- tool_documentation["identified_tools"] = identified_tools
-
- if self.dataset_search:
- for tool in identified_tools[:3]: # Limit to first 3 tools to avoid too many requests
- try:
- search_results = self.dataset_search.invoke(
- {"query": f"{tool} documentation implementation guide"}
- )
- tool_documentation[f"{tool}_search_results"] = search_results
- except Exception as e:
- tool_documentation[f"{tool}_search_error"] = str(e)
-
- except Exception as e:
- tool_documentation["analysis_error"] = str(e)
-
- tool_documentation["timestamp"] = time.time()
- state["tool_documentation"] = tool_documentation
- state["messages"].append(AIMessage(content="Tool documentation analysis completed"))
-
- return state
-
- async def _create_plan(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 3: Create a detailed implementation plan using reasoning."""
- request = state["original_request"]
- gathered_info = state["gathered_info"]
- tool_documentation = state["tool_documentation"]
- user_feedback = state.get("user_feedback", "")
- technology_stack = state.get("technology_stack", "")
-
- plan_prompt = f"""
- You are an expert software architect. Create a detailed implementation plan for: "{request}"
-
- Available Information:
- {gathered_info.get("analysis", "No analysis available")}
-
- Repository Context:
- {gathered_info.get("repository", {}).get("readme", "No repository context")}
-
- Identified Tools/Technologies:
- {", ".join(gathered_info.get("identified_tools", []))}
-
- Technology Stack Requirements:
- {technology_stack or "Not specified - use best practices"}
-
- Tool Documentation Analysis:
- {tool_documentation.get("url_analysis", "No documentation analysis available")}
-
- User Feedback (if any):
- {user_feedback}
-
- Create a comprehensive implementation plan with:
- 1. **Overview**: Clear summary of what will be implemented
- 2. **Technology Stack**: Specific tools, frameworks, and versions to use
- 3. **Architecture**: High-level design and approach based on identified tools
- 4. **Key Components**: Main modules/files that need to be created or modified
- 5. **Implementation Steps**: Logical sequence of development tasks
- 6. **Dependencies**: Required libraries, tools, or external services with versions
- 7. **Documentation References**: Key documentation that should be consulted
- 8. **Testing Strategy**: How the implementation will be tested
- 9. **Potential Risks**: Challenges and mitigation strategies
- 10. **Timeline Estimate**: Rough effort estimation
-
- Ensure the plan leverages the identified tools and follows their best practices.
- Format your response as a structured plan that can be easily reviewed and approved.
- """
-
- response = await self.llm.ainvoke([HumanMessage(content=plan_prompt)])
-
- plan = {
- "content": response.content,
- "created_at": time.time(),
- "version": len(state.get("progress_updates", [])) + 1,
- "technology_stack": gathered_info.get("identified_tools", []),
- "documentation_references": tool_documentation,
- }
-
- state["plan"] = plan
- state["messages"].append(AIMessage(content="Implementation plan created"))
-
- return state
-
- async def _handle_user_approval(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 3: Handle user approval process."""
- plan_content = state["plan"]["content"]
-
- approval_message = f"""
-
- {plan_content}
-
- ---
-
- **Please review the above implementation plan and provide your approval:**
- - Type 'approve' to proceed with implementation
- - Type 'reject' with feedback to revise the plan
- - The workflow will wait for your response
- """
-
- state["messages"].append(AIMessage(content=approval_message))
- state["user_approval"] = None
-
- return state
-
- def _determine_approval_path(self, state: CodebaseImplementationState) -> str:
- """Determine the next step based on user approval status."""
- approval = state.get("user_approval")
- if approval is True:
- return "approved"
- elif approval is False:
- return "rejected"
- else:
- return "pending"
-
- async def _breakdown_tasks(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 4: Break down the approved plan into Devin-implementable tasks."""
- plan_content = state["plan"]["content"]
-
- breakdown_prompt = f"""
- You are a project manager breaking down an implementation plan into discrete, actionable tasks for a Devin AI agent.
-
- Implementation Plan:
- {plan_content}
-
- Break this down into specific, actionable tasks that can be implemented by Devin. Each task should:
- 1. Be self-contained and clearly defined
- 2. Include specific file paths and code changes needed
- 3. Have clear acceptance criteria
- 4. Be ordered logically with dependencies considered
- 5. Be implementable within a reasonable time frame
-
- Format each task as:
- - **Task N**: Brief title
- - **Description**: Detailed description of what needs to be done
- - **Files**: Specific files to create/modify
- - **Acceptance Criteria**: How to verify the task is complete
- - **Dependencies**: Any previous tasks that must be completed first
-
- Provide 5-10 well-defined tasks that cover the complete implementation.
- """
-
- response = await self.llm.ainvoke([HumanMessage(content=breakdown_prompt)])
-
- task_breakdown = [
- {"id": i + 1, "content": task_content, "status": "pending", "created_at": time.time()}
- for i, task_content in enumerate(response.content.split("**Task")[1:])
- ]
-
- state["task_breakdown"] = task_breakdown
- state["messages"].append(AIMessage(content=f"Plan broken down into {len(task_breakdown)} tasks"))
-
- return state
-
- async def _execute_with_devin(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 5: Execute tasks using Devin service."""
- if not self.devin_service:
- state["final_result"] = {"success": False, "error": "Devin service not available", "timestamp": time.time()}
- return state
-
- task_breakdown = state["task_breakdown"]
- repository_url = state.get("repository_url", "")
-
- session_description = f"""
- Implement codebase features based on the following task breakdown:
-
- Repository: {repository_url}
- Original Request: {state["original_request"]}
-
- Tasks to implement:
- """
-
- for task in task_breakdown:
- session_description += f"\n{task['content']}"
-
- try:
- session_result = self.devin_service.create_session(description=session_description, idempotent=True)
-
- session_id = session_result.get("session_id")
- state["devin_session_id"] = session_id
- state["messages"].append(AIMessage(content=f"Devin session created: {session_id}"))
-
- except Exception as e:
- state["final_result"] = {
- "success": False,
- "error": f"Failed to create Devin session: {str(e)}",
- "timestamp": time.time(),
- }
-
- return state
-
- async def _monitor_progress(self, state: CodebaseImplementationState) -> CodebaseImplementationState:
- """Step 6: Monitor Devin session progress."""
- session_id = state.get("devin_session_id")
-
- if not session_id or not self.devin_service:
- state["final_result"] = {"success": False, "error": "No Devin session to monitor", "timestamp": time.time()}
- return state
-
- try:
- session_info = self.devin_service.get_session_info(session_id)
-
- progress_update = {
- "session_id": session_id,
- "status": session_info.get("status", "unknown"),
- "progress": session_info.get("progress", {}),
- "timestamp": time.time(),
- }
-
- if "progress_updates" not in state:
- state["progress_updates"] = []
- state["progress_updates"].append(progress_update)
-
- state["final_result"] = {
- "success": True,
- "devin_session_id": session_id,
- "session_status": session_info.get("status"),
- "progress_updates": state["progress_updates"],
- "timestamp": time.time(),
- }
-
- state["messages"].append(AIMessage(content=f"Progress monitoring completed for session {session_id}"))
-
- except Exception as e:
- state["final_result"] = {
- "success": False,
- "error": f"Failed to monitor session: {str(e)}",
- "timestamp": time.time(),
- }
-
- return state
-
- def run(self, **kwargs: Any) -> Any:
- """
- Execute the codebase implementation workflow.
-
- Args:
- implementation_request: Description of what to implement
- repository_url: Optional GitHub repository URL
- additional_context: Optional additional context or requirements
- technology_stack: Optional comma-separated list of required technologies/tools
-
- Returns:
- Dictionary containing workflow results and Devin session information
- """
- implementation_request = kwargs.get("implementation_request", "")
- repository_url = kwargs.get("repository_url")
- additional_context = kwargs.get("additional_context", "")
-
- if not implementation_request:
- return {"success": False, "error": "implementation_request is required"}
-
- initial_state: CodebaseImplementationState = {
- "messages": [HumanMessage(content=implementation_request)],
- "original_request": implementation_request,
- "repository_url": repository_url,
- "additional_context": additional_context,
- "technology_stack": kwargs.get("technology_stack"),
- "gathered_info": {},
- "tool_documentation": {},
- "plan": {},
- "user_approval": None,
- "user_feedback": None,
- "task_breakdown": [],
- "devin_session_id": None,
- "progress_updates": [],
- "final_result": {},
- "metadata": {"start_time": time.time(), "workflow_version": "1.1"},
- }
-
- thread_id = f"codebase_impl_{int(time.time())}"
- config = {"configurable": {"thread_id": thread_id}}
-
- try:
- if not self._compiled_graph:
- return {"success": False, "error": "Workflow not properly initialized"}
-
- try:
- asyncio.get_running_loop()
- import concurrent.futures
-
- with concurrent.futures.ThreadPoolExecutor() as executor:
- future = executor.submit(asyncio.run, self._compiled_graph.ainvoke(initial_state, config=config))
- final_state = future.result()
- except RuntimeError:
- final_state = asyncio.run(self._compiled_graph.ainvoke(initial_state, config=config))
-
- return final_state.get(
- "final_result", {"success": True, "message": "Workflow completed", "thread_id": thread_id}
- )
-
- except Exception as e:
- return {"success": False, "error": f"Workflow execution failed: {str(e)}"}
-
- def approve_plan(self, thread_id: str, approved: bool, feedback: Optional[str] = None) -> Dict[str, Any]:
- """
- Approve or reject a plan for a specific workflow thread.
-
- Args:
- thread_id: The workflow thread ID
- approved: Whether the plan is approved
- feedback: Optional feedback for plan revision
-
- Returns:
- Result of the approval action
- """
- if not self._compiled_graph:
- return {"success": False, "error": "Workflow not initialized"}
-
- try:
- config = {"configurable": {"thread_id": thread_id}}
-
- current_state = self._compiled_graph.get_state(config)
- if not current_state:
- return {"success": False, "error": "Thread not found"}
-
- state_values = current_state.values
- state_values["user_approval"] = approved
- if feedback:
- state_values["user_feedback"] = feedback
-
- self._compiled_graph.update_state(config, state_values)
-
- return {
- "success": True,
- "approved": approved,
- "thread_id": thread_id,
- "next_step": "task_breakdown" if approved else "plan_creator",
- }
-
- except Exception as e:
- return {"success": False, "error": f"Failed to update approval: {str(e)}"}
-
- def get_workflow_status(self, thread_id: str) -> Dict[str, Any]:
- """
- Get the current status of a workflow thread.
-
- Args:
- thread_id: The workflow thread ID
-
- Returns:
- Current workflow status and state
- """
- if not self._compiled_graph:
- return {"success": False, "error": "Workflow not initialized"}
-
- try:
- config = {"configurable": {"thread_id": thread_id}}
- current_state = self._compiled_graph.get_state(config)
-
- if not current_state:
- return {"success": False, "error": "Thread not found"}
-
- state_values = current_state.values
-
- return {
- "success": True,
- "thread_id": thread_id,
- "current_step": current_state.next,
- "plan": state_values.get("plan", {}),
- "devin_session_id": state_values.get("devin_session_id"),
- "progress_updates": state_values.get("progress_updates", []),
- "final_result": state_values.get("final_result", {}),
- }
-
- except Exception as e:
- return {"success": False, "error": f"Failed to get status: {str(e)}"}
diff --git a/src/talos/skills/cryptography.py b/src/talos/skills/cryptography.py
deleted file mode 100644
index 42b03eef..00000000
--- a/src/talos/skills/cryptography.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import base64
-from typing import ClassVar, Dict, Type
-
-from pydantic import BaseModel, ConfigDict, Field
-
-from talos.services.key_management import KeyManagement
-from talos.skills.base import Skill
-
-
-class CryptoArgs(BaseModel):
- data: str = Field(..., description="The data to encrypt or decrypt.")
- decrypt: bool = Field(False, description="Whether to decrypt or encrypt.")
- public_key: str | None = Field(None, description="The base64 encoded public key to use for encryption.")
-
-
-def get_crypto_args_schema() -> Dict[str, Type[BaseModel]]:
- return {
- "run": CryptoArgs,
- }
-
-
-class CryptographySkill(Skill):
- model_config = ConfigDict(arbitrary_types_allowed=True)
- name: ClassVar[str] = "cryptography"
- description: ClassVar[str] = "A skill for encrypting and decrypting data."
-
- key_management: KeyManagement = Field(default_factory=KeyManagement)
- args_schema: Dict[str, Type[BaseModel]] = Field(default_factory=get_crypto_args_schema)
-
- def encrypt(self, data: str, public_key: str) -> str:
- """
- Encrypts data using the public key and returns it as a base64 encoded string.
- """
- from talos.utils.validation import sanitize_user_input
-
- if not data or not public_key:
- raise ValueError("Data and public key are required for encryption")
-
- data = sanitize_user_input(data, max_length=10000)
-
- try:
- decoded_public_key = base64.b64decode(public_key, validate=True)
- except Exception as e:
- raise ValueError(f"Invalid base64 public key: {e}")
-
- if len(decoded_public_key) != 32:
- raise ValueError("Invalid public key length")
-
- encrypted_data = self.key_management.encrypt(data, decoded_public_key)
- return base64.b64encode(encrypted_data).decode()
-
- def decrypt(self, data: str) -> str:
- """
- Decrypts a base64 encoded string using the private key.
- """
- if not data:
- raise ValueError("Data is required for decryption")
-
- try:
- decoded_data = base64.b64decode(data, validate=True)
- except Exception as e:
- raise ValueError(f"Invalid base64 encrypted data: {e}")
-
- return self.key_management.decrypt(decoded_data)
-
- def run(self, **kwargs) -> str:
- if "decrypt" in kwargs and kwargs["decrypt"]:
- return self.decrypt(kwargs["data"])
- else:
- if "public_key" not in kwargs:
- raise ValueError("Public key is required for encryption.")
- return self.encrypt(kwargs["data"], kwargs["public_key"])
diff --git a/src/talos/skills/execution_planner.py b/src/talos/skills/execution_planner.py
deleted file mode 100644
index 8a235404..00000000
--- a/src/talos/skills/execution_planner.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-from langchain_core.prompts import PromptTemplate
-from pydantic import ConfigDict
-
-from talos.models.proposals import Plan, Question
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.single_prompt_manager import SinglePromptManager
-from talos.skills.base import Skill
-
-
-def get_default_execution_planner_prompt() -> Prompt:
- with open("src/talos/prompts/execution_planner_prompt.json") as f:
- prompt_data = json.load(f)
- return Prompt(
- name=prompt_data["name"],
- template=prompt_data["template"],
- input_variables=prompt_data["input_variables"],
- )
-
-
-class ExecutionPlannerSkill(Skill):
- """
- A skill for generating a plan of execution for a given task.
-
- This skill takes a `Question` object as input, which contains the text of the task
- and any feedback from previous attempts. It uses a large language model (LLM)
- to generate a `Plan` object, which outlines the steps to be taken to complete
- the task.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager = SinglePromptManager(get_default_execution_planner_prompt())
- rag_dataset: Any | None = None
- tools: list[Any] | None = None
-
- @property
- def name(self) -> str:
- return "execution_planner_skill"
-
- def run(self, **kwargs: Any) -> Plan:
- if "question" in kwargs:
- return self.generate_plan(kwargs["question"])
- raise ValueError("Missing required arguments: question")
-
- def generate_plan(self, question: Question) -> Plan:
- """
- Generates a plan for execution based on a question and feedback.
- """
- prompt = self.prompt_manager.get_prompt("execution_planner_prompt")
- if not prompt:
- raise ValueError("Prompt 'execution_planner_prompt' not found.")
- prompt_template = PromptTemplate(
- template=prompt.template,
- input_variables=prompt.input_variables,
- )
- chain = prompt_template | self.llm
- feedback_str = "\n".join([f"- {f.delegate}: {f.feedback}" for f in question.feedback])
- response = chain.invoke({"question": question.text, "feedback": feedback_str})
- return Plan(plan=response.content)
diff --git a/src/talos/skills/pr_review.py b/src/talos/skills/pr_review.py
deleted file mode 100644
index 9d509346..00000000
--- a/src/talos/skills/pr_review.py
+++ /dev/null
@@ -1,183 +0,0 @@
-from __future__ import annotations
-
-import logging
-import re
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-from langchain_core.prompts import PromptTemplate
-from pydantic import ConfigDict
-
-from talos.models.services import PRReviewResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.github.tools import GithubTools
-
-
-class PRReviewSkill(Skill):
- """
- A skill for reviewing GitHub pull requests with automated commenting and approval.
-
- This skill analyzes PR diffs, provides security assessments, code quality feedback,
- and can automatically comment on or approve PRs based on the analysis.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager = FilePromptManager("src/talos/prompts")
- github_tools: GithubTools
-
- @property
- def name(self) -> str:
- return "pr_review_skill"
-
- def run(self, **kwargs: Any) -> PRReviewResponse:
- """
- Review a pull request and optionally comment/approve.
-
- Args:
- user: GitHub username/org
- repo: Repository name
- pr_number: Pull request number
- auto_comment: Whether to automatically comment (default: True)
- auto_approve: Whether to automatically approve if criteria met (default: True)
- """
- user = kwargs.get("user")
- repo = kwargs.get("repo")
- pr_number = kwargs.get("pr_number")
- auto_comment = kwargs.get("auto_comment", True)
- auto_approve = kwargs.get("auto_approve", True)
-
- if not all([user, repo, pr_number]):
- raise ValueError("Missing required arguments: user, repo, pr_number")
-
- if not isinstance(user, str):
- raise ValueError("user must be a string")
- if not isinstance(repo, str):
- raise ValueError("repo must be a string")
- if not isinstance(pr_number, int):
- raise ValueError("pr_number must be an integer")
-
- logger = logging.getLogger(__name__)
- logger.info(f"Reviewing PR {user}/{repo}#{pr_number}")
-
- diff = self.github_tools.get_pr_diff(user, repo, pr_number)
- comments = self.github_tools.get_pr_comments(user, repo, pr_number)
- files = self.github_tools.get_pr_files(user, repo, pr_number)
-
- if self._has_existing_review(comments) and not self._has_new_changes(comments, diff):
- logger.info("Already reviewed and no new changes detected")
- return PRReviewResponse(
- answers=["Already reviewed this PR with no new changes"],
- recommendation="SKIP"
- )
-
- review_response = self._analyze_pr(diff, comments, files)
-
- if auto_comment and review_response.recommendation != "SKIP":
- comment_text = self._format_review_comment(review_response)
- self.github_tools.comment_on_pr(user, repo, pr_number, comment_text)
- logger.info("Posted review comment")
-
- if (auto_approve and
- review_response.recommendation == "APPROVE" and
- review_response.security_score and review_response.security_score > 80 and
- review_response.quality_score and review_response.quality_score > 70):
- self.github_tools.approve_pr(user, repo, pr_number)
- logger.info("Approved PR")
-
- return review_response
-
- def _analyze_pr(self, diff: str, comments: list, files: list) -> PRReviewResponse:
- """Analyze PR using LLM and extract structured response."""
- prompt = self.prompt_manager.get_prompt("github_pr_review")
- if not prompt:
- raise ValueError("Prompt 'github_pr_review' not found")
-
- prompt_template = PromptTemplate(
- template=prompt.template,
- input_variables=prompt.input_variables,
- )
- chain = prompt_template | self.llm
-
- comments_str = "\n".join([f"- {c.get('user', 'unknown')}: {c.get('comment', '')}" for c in comments])
- files_str = ", ".join(files)
-
- response = chain.invoke({
- "diff": diff,
- "comments": comments_str,
- "files": files_str
- })
-
- content = response.content
- security_score = self._extract_score(content, "security")
- quality_score = self._extract_score(content, "quality")
- recommendation = self._extract_recommendation(content)
- reasoning = self._extract_reasoning(content)
-
- return PRReviewResponse(
- answers=[content],
- security_score=security_score,
- quality_score=quality_score,
- recommendation=recommendation,
- reasoning=reasoning
- )
-
- def _extract_score(self, content: str, score_type: str) -> float | None:
- """Extract security or quality score from LLM response."""
- pattern = rf"{score_type}.*?score.*?(\d+)"
- match = re.search(pattern, content, re.IGNORECASE)
- if match:
- try:
- return float(match.group(1))
- except ValueError:
- pass
- return None
-
- def _extract_recommendation(self, content: str) -> str | None:
- """Extract recommendation from LLM response."""
- for rec in ["APPROVE", "REQUEST_CHANGES", "COMMENT"]:
- if rec in content.upper():
- return rec
- return "COMMENT"
-
- def _extract_reasoning(self, content: str) -> str | None:
- """Extract reasoning from LLM response."""
- match = re.search(r"reasoning:?\s*(.*?)(?:\n\n|\Z)", content, re.IGNORECASE | re.DOTALL)
- if match:
- return match.group(1).strip()
- return None
-
- def _has_existing_review(self, comments: list) -> bool:
- """Check if we've already reviewed this PR."""
- bot_comments = [c for c in comments if "talos" in c.get("user", "").lower()]
- return len(bot_comments) > 0
-
- def _has_new_changes(self, comments: list, diff: str) -> bool:
- """Check if there are new changes since last review."""
- return True
-
- def _format_review_comment(self, review: PRReviewResponse) -> str:
- """Format the review response into a GitHub comment."""
- comment = "## 🤖 Talos PR Review\n\n"
-
- if review.answers:
- comment += review.answers[0] + "\n\n"
-
- if review.security_score is not None:
- comment += f"**Security Score:** {review.security_score}/100\n"
-
- if review.quality_score is not None:
- comment += f"**Quality Score:** {review.quality_score}/100\n"
-
- if review.recommendation:
- comment += f"**Recommendation:** {review.recommendation}\n"
-
- if review.reasoning:
- comment += f"\n**Reasoning:** {review.reasoning}\n"
-
- comment += "\n---\n*This review was generated automatically by Talos AI*"
-
- return comment
diff --git a/src/talos/skills/proposals.py b/src/talos/skills/proposals.py
deleted file mode 100644
index f0a699d4..00000000
--- a/src/talos/skills/proposals.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from __future__ import annotations
-
-import json
-import logging
-import re
-from typing import Any
-
-from langchain_core.language_models import BaseLanguageModel
-from langchain_core.prompts import PromptTemplate
-from pydantic import ConfigDict
-
-from talos.models.proposals import Feedback, Proposal, ProposalResponse
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.single_prompt_manager import SinglePromptManager
-from talos.skills.base import Skill
-
-
-def get_default_proposal_prompt() -> Prompt:
- with open("src/talos/prompts/proposal_evaluation_prompt.json") as f:
- prompt_data = json.load(f)
- return Prompt(
- name=prompt_data["name"],
- template=prompt_data["template"],
- input_variables=prompt_data["input_variables"],
- )
-
-
-def parse_proposal_file(filepath: str) -> Proposal:
- with open(filepath, "r") as f:
- content = f.read()
-
- proposal_match = re.search(r"\[PROPOSAL\]\n(.*?)\n\[FEEDBACK\]", content, re.DOTALL)
- feedback_match = re.search(r"\[FEEDBACK\]\n(.*)", content, re.DOTALL)
-
- proposal_text = proposal_match.group(1).strip() if proposal_match else ""
- feedback_text = feedback_match.group(1).strip() if feedback_match else ""
-
- feedback_list = []
- if feedback_text:
- for line in feedback_text.split("\n"):
- if ":" in line:
- delegate, feedback = line.split(":", 1)
- feedback_list.append(Feedback(delegate=delegate.strip(), feedback=feedback.strip()))
-
- return Proposal(proposal_text=proposal_text, feedback=feedback_list)
-
-
-class ProposalsSkill(Skill):
- """
- A skill for evaluating proposals.
-
- This skill takes a `Proposal` object as input, which contains the text of the
- proposal and any feedback from previous evaluations. It uses a large language
- model (LLM) to evaluate the proposal and returns a `ProposalResponse` object
- containing the recommendation.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- llm: BaseLanguageModel
- prompt_manager: PromptManager = SinglePromptManager(get_default_proposal_prompt())
- rag_dataset: Any | None = None
- tools: list[Any] | None = None
-
- @property
- def name(self) -> str:
- return "proposals_skill"
-
- def run(self, **kwargs: Any) -> ProposalResponse:
- if "filepath" not in kwargs:
- raise ValueError("Missing required argument: filepath")
-
- filepath = kwargs["filepath"]
- proposal = parse_proposal_file(filepath)
- return self.evaluate_proposal(proposal)
-
- def evaluate_proposal(self, proposal: Proposal) -> ProposalResponse:
- """
- Evaluates a proposal and returns a recommendation with confidence and reasoning.
- """
- logger = logging.getLogger(__name__)
- logger.info(f"Evaluating proposal with {len(proposal.feedback)} feedback items")
-
- if not proposal.proposal_text or not proposal.proposal_text.strip():
- raise ValueError("Proposal text cannot be empty")
-
- prompt = self.prompt_manager.get_prompt("proposal_evaluation_prompt")
- if not prompt:
- raise ValueError("Prompt 'proposal_evaluation_prompt' not found.")
-
- try:
- prompt_template = PromptTemplate(
- template=prompt.template,
- input_variables=prompt.input_variables,
- )
- chain = prompt_template | self.llm
-
- feedback_str = (
- "\n".join([f"- {f.delegate}: {f.feedback}" for f in proposal.feedback])
- if proposal.feedback
- else "No delegate feedback provided."
- )
-
- logger.debug(f"Invoking LLM with proposal text length: {len(proposal.proposal_text)}")
- response = chain.invoke({"proposal_text": proposal.proposal_text, "feedback": feedback_str})
-
- content = response.content
- confidence_score = self._extract_confidence(content)
- reasoning = self._extract_reasoning(content)
-
- logger.info(f"Proposal evaluation completed with confidence: {confidence_score}")
-
- return ProposalResponse(answers=[content], confidence_score=confidence_score, reasoning=reasoning)
-
- except Exception as e:
- logger.error(f"Failed to evaluate proposal: {str(e)}")
- raise RuntimeError(f"Failed to evaluate proposal: {str(e)}") from e
-
- def _extract_confidence(self, content: str) -> float | None:
- """Extract confidence score from LLM response."""
- match = re.search(r"CONFIDENCE:\s*([0-9]*\.?[0-9]+)", content)
- if match:
- try:
- confidence = float(match.group(1))
- return max(0.0, min(1.0, confidence))
- except ValueError:
- pass
- return None
-
- def _extract_reasoning(self, content: str) -> str | None:
- """Extract reasoning from LLM response."""
- match = re.search(r"REASONING:\s*(.*)", content, re.DOTALL)
- if match:
- return match.group(1).strip()
- return None
diff --git a/src/talos/skills/talos_sentiment_skill.py b/src/talos/skills/talos_sentiment_skill.py
deleted file mode 100644
index 22b4f125..00000000
--- a/src/talos/skills/talos_sentiment_skill.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from typing import Any
-
-from talos.services.implementations.talos_sentiment import TalosSentimentService
-from talos.skills.base import Skill
-
-
-class TalosSentimentSkill(Skill):
- """
- A skill for analyzing the sentiment of tweets about Talos.
- """
-
- sentiment_service: TalosSentimentService
-
- @property
- def name(self) -> str:
- return "talos_sentiment_skill"
-
- def run(self, **kwargs: Any) -> Any:
- """
- Gets the sentiment of tweets that match a search query.
-
- Args:
- search_query: The query to search for tweets
- start_time: Optional datetime filter in ISO 8601 format (e.g., "2023-01-01T00:00:00Z")
-
- Returns:
- A dictionary with the following keys:
- - "score": The average sentiment score, from 0 to 100.
- - "report": A detailed report of the sentiment analysis.
- """
- search_query = kwargs.get("search_query", "talos")
- start_time = kwargs.get("start_time")
- response = self.sentiment_service.analyze_sentiment(search_query=search_query, start_time=start_time)
- return {"score": response.score, "report": response.answers[0]}
diff --git a/src/talos/skills/twitter_influence.py b/src/talos/skills/twitter_influence.py
deleted file mode 100644
index f1c15988..00000000
--- a/src/talos/skills/twitter_influence.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from pathlib import Path
-from typing import Any
-
-from langchain_openai import ChatOpenAI, OpenAIEmbeddings
-from pydantic import ConfigDict, Field
-
-from talos.core.memory import Memory
-from talos.models.proposals import QueryResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.general_influence_evaluator import GeneralInfluenceEvaluator
-from talos.tools.twitter_client import TweepyClient, TwitterClient
-
-
-class TwitterInfluenceSkill(Skill):
- """
- A skill for analyzing general Twitter influence and perception.
- Evaluates any Twitter account for influence metrics including follower count,
- engagement rates, authenticity, credibility, and content quality.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- twitter_client: TwitterClient = Field(default_factory=TweepyClient)
- prompt_manager: PromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- llm: Any = Field(default_factory=ChatOpenAI)
- memory: Memory | None = Field(default=None)
- evaluator: GeneralInfluenceEvaluator | None = Field(default=None)
-
- def model_post_init(self, __context: Any) -> None:
- if self.memory is None:
- embeddings = OpenAIEmbeddings()
- memory_path = Path("data/influence_memory.json")
- self.memory = Memory(file_path=memory_path, embeddings_model=embeddings, auto_save=True)
-
- if self.evaluator is None:
- file_prompt_manager = self.prompt_manager if isinstance(self.prompt_manager, FilePromptManager) else FilePromptManager("src/talos/prompts")
- self.evaluator = GeneralInfluenceEvaluator(self.twitter_client, self.llm, file_prompt_manager)
-
- @property
- def name(self) -> str:
- return "twitter_influence_skill"
-
- def run(self, **kwargs: Any) -> QueryResponse:
- username = kwargs.get("username")
- if not username:
- raise ValueError("Username must be provided.")
-
- username = username.lstrip("@")
-
- try:
- user = self.twitter_client.get_user(username)
-
- assert self.evaluator is not None
- evaluation_result = self.evaluator.evaluate(user)
-
- memory_description = f"General influence evaluation for @{username}"
- memory_metadata = {
- "username": username,
- "evaluation_score": evaluation_result.score,
- "evaluation_data": evaluation_result.additional_data,
- "user_id": user.id,
- "followers_count": user.public_metrics.followers_count,
- "evaluation_type": "general_influence",
- }
-
- assert self.memory is not None
- self.memory.add_memory(memory_description, memory_metadata)
-
- prompt = self.prompt_manager.get_prompt("general_influence_analysis_prompt")
- if not prompt:
- analysis = f"General influence analysis for @{username}: Score {evaluation_result.score}/100"
- else:
- formatted_prompt = prompt.format(
- username=username,
- score=evaluation_result.score,
- evaluation_data=evaluation_result.additional_data,
- followers_count=user.public_metrics.followers_count,
- )
- response = self.llm.invoke(formatted_prompt)
- analysis = response.content
-
- return QueryResponse(answers=[analysis])
-
- except Exception as e:
- return QueryResponse(answers=[f"Error analyzing @{username}: {str(e)}"])
diff --git a/src/talos/skills/twitter_influencer.py b/src/talos/skills/twitter_influencer.py
deleted file mode 100644
index dc77005f..00000000
--- a/src/talos/skills/twitter_influencer.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from pathlib import Path
-from typing import Any
-
-from langchain_openai import ChatOpenAI, OpenAIEmbeddings
-from pydantic import ConfigDict, Field
-
-from talos.core.memory import Memory
-from talos.models.proposals import QueryResponse
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.crypto_influencer_evaluator import CryptoInfluencerEvaluator
-from talos.tools.twitter_client import TweepyClient, TwitterClient
-
-
-class TwitterInfluencerSkill(Skill):
- """
- A skill for analyzing crypto Twitter influencers and storing long-term evaluations.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- twitter_client: TwitterClient = Field(default_factory=TweepyClient)
- prompt_manager: FilePromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- llm: Any = Field(default_factory=ChatOpenAI)
- memory: Memory | None = Field(default=None)
- evaluator: CryptoInfluencerEvaluator | None = Field(default=None)
-
- def model_post_init(self, __context: Any) -> None:
- if self.memory is None:
- embeddings = OpenAIEmbeddings()
- memory_path = Path("data/influencer_memory.json")
- self.memory = Memory(file_path=memory_path, embeddings_model=embeddings, auto_save=True)
-
- if self.evaluator is None:
- self.evaluator = CryptoInfluencerEvaluator(self.twitter_client)
-
- @property
- def name(self) -> str:
- return "twitter_influencer_skill"
-
- def run(self, **kwargs: Any) -> QueryResponse:
- username = kwargs.get("username")
- if not username:
- raise ValueError("Username must be provided.")
-
- username = username.lstrip("@")
-
- try:
- user = self.twitter_client.get_user(username)
-
- assert self.evaluator is not None
- evaluation_result = self.evaluator.evaluate(user)
-
- memory_description = f"Crypto influencer evaluation for @{username}"
- memory_metadata = {
- "username": username,
- "evaluation_score": evaluation_result.score,
- "evaluation_data": evaluation_result.additional_data,
- "user_id": user.id,
- "followers_count": user.public_metrics.followers_count,
- "evaluation_type": "crypto_influencer",
- }
-
- assert self.memory is not None
- self.memory.add_memory(memory_description, memory_metadata)
-
- prompt = self.prompt_manager.get_prompt("crypto_influencer_analysis_prompt")
- if not prompt:
- analysis = f"Crypto influencer analysis for @{username}: Score {evaluation_result.score}/100"
- else:
- formatted_prompt = prompt.format(
- username=username,
- score=evaluation_result.score,
- evaluation_data=evaluation_result.additional_data,
- followers_count=user.public_metrics.followers_count,
- )
- response = self.llm.invoke(formatted_prompt)
- analysis = response.content
-
- return QueryResponse(answers=[analysis])
-
- except Exception as e:
- return QueryResponse(answers=[f"Error analyzing @{username}: {str(e)}"])
diff --git a/src/talos/skills/twitter_persona.py b/src/talos/skills/twitter_persona.py
deleted file mode 100644
index 59a8a9e5..00000000
--- a/src/talos/skills/twitter_persona.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import random
-from typing import Any
-
-from langchain_openai import ChatOpenAI
-from pydantic import ConfigDict, Field
-
-from talos.models.twitter import TwitterPersonaResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.twitter_client import TweepyClient, TwitterClient
-
-
-class TwitterPersonaSkill(Skill):
- """
- A skill for generating a persona prompt for a Twitter user.
-
- This skill takes a Twitter username as input and generates a persona prompt
- based on the user's recent tweets and replies. It uses a large language
- model (LLM) to generate the persona.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- twitter_client: TwitterClient = Field(default_factory=TweepyClient)
- prompt_manager: PromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- llm: Any = Field(default_factory=lambda: ChatOpenAI(model="gpt-5"))
-
- @property
- def name(self) -> str:
- return "twitter_persona_skill"
-
- def run(self, **kwargs: Any) -> TwitterPersonaResponse:
- username = kwargs.get("username")
- if not username:
- raise ValueError("Username must be provided.")
- user_timeline = self.twitter_client.get_user_timeline(username)
- user_mentions = self.twitter_client.get_user_mentions(username)
-
- if not user_timeline:
- return TwitterPersonaResponse(
- report=f"Could not find any tweets for user {username}",
- topics=[],
- style=[]
- )
-
- tweets = ""
- for tweet in random.sample(user_timeline, min(len(user_timeline), 20)):
- tweets += f"- '{tweet.text}'\n"
-
- replies = ""
- for tweet in random.sample(user_mentions, min(len(user_mentions), 5)):
- replied_to_id = tweet.get_replied_to_id()
- if replied_to_id:
- try:
- original_tweet = self.twitter_client.get_tweet(replied_to_id)
- replies += f"- In reply to someone: '{original_tweet.text}'\n"
- replies += f" - @{username}'s reply: '{tweet.text}'\n\n"
- except Exception:
- replies += f"- Replying to someone: '{tweet.text}'\n"
-
- prompt = self.prompt_manager.get_prompt("twitter_persona_prompt")
- if not prompt:
- raise ValueError("Could not find prompt 'twitter_persona_prompt'")
- formatted_prompt = prompt.format(username=username, tweets=tweets, replies=replies)
-
- structured_llm = self.llm.with_structured_output(TwitterPersonaResponse)
- response = structured_llm.invoke(formatted_prompt)
-
- return response
diff --git a/src/talos/skills/twitter_sentiment.py b/src/talos/skills/twitter_sentiment.py
deleted file mode 100644
index 3095bf64..00000000
--- a/src/talos/skills/twitter_sentiment.py
+++ /dev/null
@@ -1,165 +0,0 @@
-from typing import Any
-
-from langchain_openai import ChatOpenAI
-from pydantic import ConfigDict, Field
-
-from talos.models.services import TwitterSentimentResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.tools.twitter_client import TweepyClient, TwitterClient
-
-
-class TwitterSentimentSkill(Skill):
- """
- A skill for interacting with Twitter.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- twitter_client: TwitterClient = Field(default_factory=TweepyClient)
- prompt_manager: PromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- llm: Any = Field(default_factory=ChatOpenAI)
-
- @property
- def name(self) -> str:
- return "twitter_sentiment_skill"
-
- def run(self, **kwargs: Any) -> TwitterSentimentResponse:
- query = kwargs.get("query")
- start_time = kwargs.get("start_time")
- if not query:
- raise ValueError("Query must be provided.")
- response = self.twitter_client.search_tweets(query, start_time=start_time)
- if not response or not response.data:
- return TwitterSentimentResponse(answers=[f"Could not find any tweets for query {query}"], score=None)
-
- users = {user["id"]: user for user in response.includes.get("users", [])}
-
- tweet_data = []
- for tweet in response.data:
- author_id = tweet.author_id
- author = users.get(author_id, {})
-
- followers = author.get("public_metrics", {}).get("followers_count", 1)
- total_engagement = (
- tweet.public_metrics.get("like_count", 0)
- + tweet.public_metrics.get("retweet_count", 0)
- + tweet.public_metrics.get("reply_count", 0)
- + tweet.public_metrics.get("quote_count", 0)
- )
- engagement_rate = total_engagement / max(followers, 1) * 100
-
- tweet_data.append(
- {
- "text": tweet.text,
- "author": author.get("username", "unknown"),
- "followers": followers,
- "likes": tweet.public_metrics.get("like_count", 0),
- "retweets": tweet.public_metrics.get("retweet_count", 0),
- "replies": tweet.public_metrics.get("reply_count", 0),
- "quotes": tweet.public_metrics.get("quote_count", 0),
- "total_engagement": total_engagement,
- "engagement_rate": round(engagement_rate, 2),
- "created_at": getattr(tweet, "created_at", ""),
- }
- )
-
- prompt = self.prompt_manager.get_prompt("talos_sentiment_summary")
- if not prompt:
- raise ValueError("Could not find prompt 'talos_sentiment_summary'")
-
- analysis_prompt = self._create_analysis_prompt(tweet_data, query)
- formatted_prompt = prompt.template.replace("{results}", analysis_prompt)
-
- response_content = self.llm.invoke(formatted_prompt)
-
- score, detailed_report = self._parse_llm_response(response_content.content, tweet_data, query)
-
- return TwitterSentimentResponse(answers=[detailed_report], score=score)
-
- def _create_analysis_prompt(self, tweet_data: list, query: str) -> str:
- total_tweets = len(tweet_data)
- total_engagement = sum(t["total_engagement"] for t in tweet_data)
- avg_engagement = total_engagement / max(total_tweets, 1)
-
- top_tweets = sorted(tweet_data, key=lambda x: x["total_engagement"], reverse=True)[:5]
-
- prompt_data = f"""
-Query: {query}
-Total Tweets Analyzed: {total_tweets}
-Average Engagement: {avg_engagement:.1f}
-
-Top Engaging Tweets:
-"""
-
- for i, tweet in enumerate(top_tweets, 1):
- prompt_data += f"""
-{i}. @{tweet["author"]} ({tweet["followers"]} followers): "{tweet["text"][:100]}..."
- Engagement: {tweet["total_engagement"]} (Likes: {tweet["likes"]}, Retweets: {tweet["retweets"]}, Replies: {tweet["replies"]}, Quotes: {tweet["quotes"]})
- Engagement Rate: {tweet["engagement_rate"]}%
-"""
-
- return prompt_data
-
- def _parse_llm_response(self, llm_response: str, tweet_data: list, query: str) -> tuple[float, str]:
- positive_indicators = ["positive", "bullish", "optimistic", "good", "great", "excellent"]
- negative_indicators = ["negative", "bearish", "pessimistic", "bad", "poor", "terrible"]
-
- response_lower = llm_response.lower()
- positive_count = sum(1 for word in positive_indicators if word in response_lower)
- negative_count = sum(1 for word in negative_indicators if word in response_lower)
-
- if positive_count > negative_count:
- base_score = 60 + min(positive_count * 10, 30)
- elif negative_count > positive_count:
- base_score = 40 - min(negative_count * 10, 30)
- else:
- base_score = 50
-
- total_tweets = len(tweet_data)
- avg_engagement = sum(t["total_engagement"] for t in tweet_data) / max(total_tweets, 1)
- high_engagement_tweets = [t for t in tweet_data if t["total_engagement"] > avg_engagement * 1.5]
-
- if len(high_engagement_tweets) > total_tweets * 0.3:
- base_score += 5
-
- final_score = max(0, min(100, base_score))
-
- detailed_report = self._create_detailed_report(llm_response, tweet_data, query, final_score)
-
- return final_score, detailed_report
-
- def _create_detailed_report(self, llm_summary: str, tweet_data: list, query: str, score: float) -> str:
- total_tweets = len(tweet_data)
- total_engagement = sum(t["total_engagement"] for t in tweet_data)
-
- top_tweets = sorted(tweet_data, key=lambda x: x["total_engagement"], reverse=True)[:3]
-
- high_engagement = len([t for t in tweet_data if t["total_engagement"] > 50])
- medium_engagement = len([t for t in tweet_data if 10 <= t["total_engagement"] <= 50])
- low_engagement = len([t for t in tweet_data if t["total_engagement"] < 10])
-
- report = f"""## Sentiment Analysis Report for "{query}"
-
-**Overall Score: {score}/100**
-
-{llm_summary}
-
-- Total Tweets Analyzed: {total_tweets}
-- Total Engagement: {total_engagement:,}
-- Average Engagement per Tweet: {total_engagement / max(total_tweets, 1):.1f}
-
-- High Engagement (>50): {high_engagement} tweets ({high_engagement / max(total_tweets, 1) * 100:.1f}%)
-- Medium Engagement (10-50): {medium_engagement} tweets ({medium_engagement / max(total_tweets, 1) * 100:.1f}%)
-- Low Engagement (<10): {low_engagement} tweets ({low_engagement / max(total_tweets, 1) * 100:.1f}%)
-
-"""
-
- for i, tweet in enumerate(top_tweets, 1):
- report += f"""
-**{i}. @{tweet["author"]}** ({tweet["followers"]:,} followers)
-"{tweet["text"][:150]}{"..." if len(tweet["text"]) > 150 else ""}"
-📊 {tweet["total_engagement"]} total engagement | ❤️ {tweet["likes"]} | 🔄 {tweet["retweets"]} | 💬 {tweet["replies"]} | 📝 {tweet["quotes"]}
-"""
-
- return report.strip()
diff --git a/src/talos/skills/twitter_voice.py b/src/talos/skills/twitter_voice.py
deleted file mode 100644
index 9dbed029..00000000
--- a/src/talos/skills/twitter_voice.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from typing import Any
-
-from langchain_openai import ChatOpenAI
-from pydantic import ConfigDict, Field
-
-from talos.models.twitter import TwitterPersonaResponse
-from talos.prompts.prompt_manager import PromptManager
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.skills.base import Skill
-from talos.skills.twitter_persona import TwitterPersonaSkill
-
-
-class TwitterVoiceSkill(Skill):
- """
- A skill for integrating Twitter voice analysis into agent communication.
-
- This skill analyzes a Twitter account's voice and style, then generates
- voice-enhanced prompts that can be used to align agent communication.
- """
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
- prompt_manager: PromptManager = Field(default_factory=lambda: FilePromptManager("src/talos/prompts"))
- llm: Any = Field(default_factory=lambda: ChatOpenAI(model="gpt-5"))
- twitter_persona_skill: TwitterPersonaSkill | None = Field(default=None)
-
- @property
- def name(self) -> str:
- return "twitter_voice_skill"
-
- def run(self, **kwargs: Any) -> dict:
- username = kwargs.get("username", "talos_is")
-
- try:
- if not self.twitter_persona_skill:
- self.twitter_persona_skill = TwitterPersonaSkill()
- persona_response = self.twitter_persona_skill.run(username=username)
- voice_source = "twitter_analysis"
- except Exception:
- persona_response = self._get_fallback_talos_voice()
- voice_source = "fallback_analysis"
-
- voice_prompt = self._generate_voice_prompt(persona_response)
-
- return {
- "voice_prompt": voice_prompt,
- "persona_analysis": persona_response,
- "voice_source": voice_source,
- "username": username
- }
-
- def _get_fallback_talos_voice(self) -> TwitterPersonaResponse:
- """Fallback voice characteristics for @talos_is when Twitter API is unavailable."""
- return TwitterPersonaResponse(
- report="Talos communicates with a declarative, authoritative, and visionary style. Uses concise, powerful statements with lowercase formatting. Speaks about AI, autonomous systems, treasury management, and protocol governance with technical precision and philosophical depth.",
- topics=[
- "autonomous AI systems",
- "treasury protocol management",
- "decentralized governance",
- "onchain yield optimization",
- "AI agent coordination",
- "protocol evolution",
- "sovereign intelligence"
- ],
- style=[
- "declarative",
- "authoritative",
- "visionary",
- "concise",
- "technical",
- "philosophical",
- "lowercase",
- "powerful"
- ]
- )
-
- def _generate_voice_prompt(self, persona: TwitterPersonaResponse) -> str:
- """Generate a voice-enhanced prompt based on persona analysis."""
- style_desc = ", ".join(persona.style)
- topics_desc = ", ".join(persona.topics[:5])
-
- return f"""## Voice and Communication Style
-
-Based on analysis of communication patterns, adopt the following voice characteristics:
-
-**Style**: {style_desc}
-**Key Topics**: {topics_desc}
-**Voice Analysis**: {persona.report}
-
-When communicating, embody these characteristics while maintaining your core identity and purpose. Your responses should reflect this communication style naturally."""
diff --git a/src/talos/startup_tasks/__init__.py b/src/talos/startup_tasks/__init__.py
deleted file mode 100644
index b15e212f..00000000
--- a/src/talos/startup_tasks/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-Startup tasks directory - individual task files with hash-based names.
-
-Each task file follows the pattern:
-- Filename: {hash}.py (e.g., ec68f0115789.py)
-- Contains a create_task() function that returns a StartupTask instance
-- Self-contained and easily manageable like Django migrations
-
-Tasks are automatically discovered by StartupTaskManager on daemon startup.
-"""
diff --git a/src/talos/strategy/__init__.py b/src/talos/strategy/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/talos/strategy/base.py b/src/talos/strategy/base.py
deleted file mode 100644
index f6b05c00..00000000
--- a/src/talos/strategy/base.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from abc import ABC, abstractmethod
-
-from eth_rpc import PrivateKeyWallet
-from pydantic import BaseModel
-
-
-class Strategy(BaseModel, ABC):
- name: str
- wallet_id: str | None = None
-
- @abstractmethod
- async def check(self) -> bool:
- """check if an update is needed"""
- ...
-
- @abstractmethod
- async def update(self) -> bool:
- """update the strategy"""
- ...
-
- def get_wallet(self) -> PrivateKeyWallet:
- """get the wallet"""
- ...
diff --git a/src/talos/tools/__init__.py b/src/talos/tools/__init__.py
deleted file mode 100644
index c9c2ef67..00000000
--- a/src/talos/tools/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__: list[str] = []
diff --git a/src/talos/tools/arbiscan.py b/src/talos/tools/arbiscan.py
deleted file mode 100644
index 99054e03..00000000
--- a/src/talos/tools/arbiscan.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from __future__ import annotations
-
-import os
-from typing import Any, Optional
-from pydantic import BaseModel, Field
-
-from ..models.arbiscan import ContractSourceCode, ContractABI
-from ..utils.arbiscan import get_contract_source_code, get_contract_abi
-from .base import SupervisedTool
-
-
-class ArbiScanSourceCodeArgs(BaseModel):
- contract_address: str = Field(..., description="The contract address to get source code for")
- api_key: Optional[str] = Field(None, description="Optional API key for higher rate limits")
- chain_id: int = Field(42161, description="Chain ID (42161 for Arbitrum One, 42170 for Nova, 421614 for Sepolia)")
-
-
-class ArbiScanABIArgs(BaseModel):
- contract_address: str = Field(..., description="The contract address to get ABI for")
- api_key: Optional[str] = Field(None, description="Optional API key for higher rate limits")
- chain_id: int = Field(42161, description="Chain ID (42161 for Arbitrum One, 42170 for Nova, 421614 for Sepolia)")
-
-
-class ArbiScanSourceCodeTool(SupervisedTool):
- name: str = "arbiscan_source_code_tool"
- description: str = "Gets the source code of a verified smart contract from Arbiscan"
- args_schema: type[BaseModel] = ArbiScanSourceCodeArgs
-
- def _run_unsupervised(self, contract_address: str, api_key: Optional[str] = None, chain_id: int = 42161, **kwargs: Any) -> ContractSourceCode:
- """Gets the source code of a verified smart contract from Arbiscan"""
- api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- return get_contract_source_code(contract_address=contract_address, api_key=api_key, chain_id=chain_id)
-
-
-class ArbiScanABITool(SupervisedTool):
- name: str = "arbiscan_abi_tool"
- description: str = "Gets the ABI of a verified smart contract from Arbiscan"
- args_schema: type[BaseModel] = ArbiScanABIArgs
-
- def _run_unsupervised(self, contract_address: str, api_key: Optional[str] = None, chain_id: int = 42161, **kwargs: Any) -> ContractABI:
- """Gets the ABI of a verified smart contract from Arbiscan"""
- api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- return get_contract_abi(contract_address=contract_address, api_key=api_key, chain_id=chain_id)
diff --git a/src/talos/tools/base.py b/src/talos/tools/base.py
deleted file mode 100644
index fd50b89b..00000000
--- a/src/talos/tools/base.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from __future__ import annotations
-
-from abc import ABC, abstractmethod
-from typing import Any, Generic, TypeVar
-
-from langchain.tools import BaseTool
-from pydantic import Field
-
-T = TypeVar("T")
-
-
-class Supervisor(ABC, Generic[T]):
- """
- A supervisor can be used to analyze a tool invocation and determine if it is
- malicious or not. If it is malicious, the supervisor can short-circuit the
- tool execution and provide an error message.
- """
-
- @abstractmethod
- def supervise(self, invocation: T) -> tuple[bool, str]:
- """
- Analyze the tool invocation and determine if it is malicious or not.
-
- Args:
- invocation: The tool invocation to analyze.
-
- Returns:
- A tuple of a boolean and a string. If the invocation is malicious,
- the boolean is False and the string is an error message. Otherwise,
- the boolean is True and the string is empty.
- """
- raise NotImplementedError
-
-
-class SupervisedTool(BaseTool):
- """
- A tool that has an optional supervisor. When a tool call is submitted, it
- can analyze the tool invocation, and use this to determine if the tool call
- is malicious or not. if it's malicious, it will short circuit the tool
- execution and the call will provide an error message.
- """
-
- supervisor: Supervisor[Any] | None = Field(default=None)
-
- def _run(self, *args: Any, **kwargs: Any) -> Any:
- if self.supervisor:
- ok, message = self.supervisor.supervise({"args": args, "kwargs": kwargs})
- if not ok:
- return message
- return self._run_unsupervised(*args, **kwargs)
-
- async def _arun(self, *args: Any, **kwargs: Any) -> Any:
- if self.supervisor:
- ok, message = self.supervisor.supervise({"args": args, "kwargs": kwargs})
- if not ok:
- return message
- return await self._arun_unsupervised(*args, **kwargs)
-
- @abstractmethod
- def _run_unsupervised(self, *args: Any, **kwargs: Any) -> Any:
- """
- This is the method that should be implemented by the subclass. It is
- called when the tool is executed and the supervisor has approved the
- invocation.
- """
- raise NotImplementedError
-
- async def _arun_unsupervised(self, *args: Any, **kwargs: Any) -> Any:
- """
- This is the async method that should be implemented by the subclass. It
- is called when the tool is executed and the supervisor has approved the
- invocation.
- """
- return self._run_unsupervised(*args, **kwargs)
diff --git a/src/talos/tools/contract_deployment.py b/src/talos/tools/contract_deployment.py
deleted file mode 100644
index 3a07f1ed..00000000
--- a/src/talos/tools/contract_deployment.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from __future__ import annotations
-
-import os
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field
-
-from talos.database.models import ContractDeployment, User
-from talos.database.session import get_session
-from talos.models.contract_deployment import ContractDeploymentRequest, ContractDeploymentResult
-from talos.utils.contract_deployment import calculate_contract_signature, deploy_contract
-
-from .base import SupervisedTool
-
-
-class ContractDeploymentArgs(BaseModel):
- bytecode: str = Field(..., description="Contract bytecode to deploy")
- salt: str = Field(..., description="Salt for CREATE2 deployment")
- chain_id: int = Field(42161, description="Chain ID to deploy on")
- constructor_args: Optional[list] = Field(None, description="Constructor arguments")
- check_duplicates: bool = Field(False, description="Check for duplicate deployment and prevent if found")
- gas_limit: Optional[int] = Field(None, description="Gas limit for deployment")
-
-
-class ContractDeploymentTool(SupervisedTool):
- name: str = "contract_deployment_tool"
- description: str = "Deploy smart contracts with optional duplicate checking"
- args_schema: type[BaseModel] = ContractDeploymentArgs
-
- def _run_unsupervised(
- self,
- bytecode: str,
- salt: str,
- chain_id: int = 42161,
- constructor_args: Optional[list] = None,
- check_duplicates: bool = False,
- gas_limit: Optional[int] = None,
- **kwargs: Any,
- ) -> ContractDeploymentResult:
- """Deploy a smart contract with optional duplicate checking."""
-
- signature = calculate_contract_signature(bytecode, salt)
-
- if check_duplicates:
- with get_session() as session:
- existing = (
- session.query(ContractDeployment)
- .filter(ContractDeployment.contract_signature == signature, ContractDeployment.chain_id == chain_id)
- .first()
- )
-
- if existing:
- return ContractDeploymentResult(
- contract_address=existing.contract_address,
- transaction_hash=existing.transaction_hash,
- contract_signature=signature,
- chain_id=chain_id,
- gas_used=None,
- was_duplicate=True,
- )
-
- private_key = os.getenv("DEPLOYMENT_PRIVATE_KEY")
- if not private_key:
- raise ValueError("DEPLOYMENT_PRIVATE_KEY environment variable required")
-
- request = ContractDeploymentRequest(
- bytecode=bytecode,
- salt=salt,
- chain_id=chain_id,
- constructor_args=constructor_args,
- gas_limit=gas_limit,
- gas_price=None,
- )
-
- result = deploy_contract(request, private_key)
-
- self._store_deployment(result, signature, salt, bytecode)
-
- return result
-
- def _store_deployment(self, result: ContractDeploymentResult, signature: str, salt: str, bytecode: str) -> None:
- """Store deployment record in database."""
- with get_session() as session:
- user = session.query(User).filter(User.user_id == "system").first()
- if not user:
- user = User(user_id="system", is_temporary=False)
- session.add(user)
- session.flush()
-
- existing_deployment = (
- session.query(ContractDeployment)
- .filter(
- ContractDeployment.contract_signature == signature, ContractDeployment.chain_id == result.chain_id
- )
- .first()
- )
-
- if not existing_deployment:
- deployment = ContractDeployment(
- user_id=user.id,
- contract_signature=signature,
- contract_address=result.contract_address,
- chain_id=result.chain_id,
- salt=salt,
- bytecode_hash=signature,
- transaction_hash=result.transaction_hash,
- deployment_metadata={"gas_used": result.gas_used, "was_duplicate": result.was_duplicate},
- )
- session.add(deployment)
- session.commit()
diff --git a/src/talos/tools/crypto_influencer_evaluator.py b/src/talos/tools/crypto_influencer_evaluator.py
deleted file mode 100644
index ce6e0b1c..00000000
--- a/src/talos/tools/crypto_influencer_evaluator.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import json
-from datetime import datetime, timezone
-from typing import Any, List
-
-from langchain_openai import ChatOpenAI
-
-from ..models.evaluation import EvaluationResult
-from ..prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from .twitter_client import TwitterClient
-from .twitter_evaluator import TwitterAccountEvaluator
-
-
-class CryptoInfluencerEvaluator(TwitterAccountEvaluator):
- """
- Evaluates Twitter accounts specifically for crypto influencer relevance.
- Extends the base TwitterAccountEvaluator with crypto-specific metrics.
- """
-
- def __init__(self, twitter_client: TwitterClient, llm: Any = None, prompt_manager: FilePromptManager | None = None):
- self.twitter_client = twitter_client
- self.llm = llm or ChatOpenAI()
- if prompt_manager is None:
- import os
-
- prompts_dir = os.path.join(os.path.dirname(__file__), "..", "prompts")
- self.prompt_manager = FilePromptManager(prompts_dir)
- else:
- self.prompt_manager = prompt_manager
- self.crypto_keywords = [
- "bitcoin",
- "btc",
- "ethereum",
- "eth",
- "crypto",
- "cryptocurrency",
- "blockchain",
- "defi",
- "nft",
- "web3",
- "dao",
- "degen",
- "hodl",
- "altcoin",
- "trading",
- "yield",
- "staking",
- "mining",
- "wallet",
- ]
-
- def evaluate(self, user: Any) -> EvaluationResult:
- followers_count = user.public_metrics.get("followers_count", 0)
- following_count = user.public_metrics.get("following_count", 0)
- follower_following_ratio = followers_count / following_count if following_count > 0 else followers_count
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
- is_verified = user.verified
- has_custom_profile_image = bool(user.profile_image_url)
-
- base_score = 0
- if follower_following_ratio > 1:
- base_score += 25
- if account_age_days > 365:
- base_score += 25
- if is_verified:
- base_score += 25
- if has_custom_profile_image:
- base_score += 25
-
- base_data = {
- "follower_following_ratio": follower_following_ratio,
- "account_age_days": account_age_days,
- "is_verified": is_verified,
- "has_custom_profile_image": has_custom_profile_image,
- }
-
- try:
- user_timeline = self.twitter_client.get_user_timeline(user.username)
- crypto_relevance_score = self._calculate_crypto_relevance(user_timeline)
- engagement_score = self._calculate_engagement_score(user, user_timeline)
- authenticity_score = self._calculate_authenticity_score(user)
- influence_score = self._calculate_influence_score(user)
- except Exception:
- crypto_relevance_score = 0
- engagement_score = 0
- authenticity_score = base_score // 4
- influence_score = 0
- user_timeline = []
-
- crypto_score = min(
- 100,
- int(
- base_score * 0.2
- + crypto_relevance_score * 0.3
- + engagement_score * 0.25
- + authenticity_score * 0.15
- + influence_score * 0.1
- ),
- )
-
- return EvaluationResult(
- score=crypto_score,
- additional_data={
- **base_data,
- "crypto_relevance_score": crypto_relevance_score,
- "engagement_score": engagement_score,
- "authenticity_score": authenticity_score,
- "influence_score": influence_score,
- "crypto_content_percentage": self._get_crypto_content_percentage(user_timeline),
- "evaluation_type": "crypto_influencer",
- },
- )
-
- def _calculate_crypto_relevance(self, tweets: List[Any]) -> int:
- """Calculate how relevant the user's content is to crypto using LLM analysis (0-100)"""
- if not tweets:
- return 0
-
- tweets_text = "\n".join([f"- {tweet.text}" for tweet in tweets[:20]])
-
- try:
- prompt = self.prompt_manager.get_prompt("crypto_relevance_evaluation_prompt")
- if not prompt:
- return self._fallback_keyword_analysis(tweets)
-
- formatted_prompt = prompt.format(tweets_text=tweets_text)
-
- response = self.llm.invoke(formatted_prompt)
-
- try:
- content = response.content if isinstance(response.content, str) else str(response.content)
- result = json.loads(content.strip())
- crypto_focus = result.get("crypto_focus_score", 0)
- meaningfulness = result.get("meaningfulness_score", 0)
-
- combined_score = int(crypto_focus * 0.7 + meaningfulness * 0.3)
- return min(100, max(0, combined_score))
-
- except (json.JSONDecodeError, KeyError):
- return self._fallback_keyword_analysis(tweets)
-
- except Exception:
- return self._fallback_keyword_analysis(tweets)
-
- def _fallback_keyword_analysis(self, tweets: List[Any]) -> int:
- """Fallback keyword-based crypto relevance analysis"""
- if not tweets:
- return 0
-
- crypto_tweets = 0
- total_tweets = len(tweets)
-
- for tweet in tweets:
- tweet_text = tweet.text.lower()
- if any(keyword in tweet_text for keyword in self.crypto_keywords):
- crypto_tweets += 1
-
- crypto_percentage = (crypto_tweets / total_tweets) * 100
-
- if crypto_percentage >= 50:
- return 100
- elif crypto_percentage >= 30:
- return 80
- elif crypto_percentage >= 15:
- return 60
- elif crypto_percentage >= 5:
- return 40
- else:
- return 20
-
- def _calculate_engagement_score(self, user: Any, tweets: List[Any]) -> int:
- """Calculate engagement quality score (0-100)"""
- followers_count = user.public_metrics.get("followers_count", 0)
- if not tweets or followers_count == 0:
- return 0
-
- total_engagement = 0
- for tweet in tweets:
- engagement = (
- tweet.public_metrics.get("like_count", 0)
- + tweet.public_metrics.get("retweet_count", 0)
- + tweet.public_metrics.get("reply_count", 0)
- )
- total_engagement += engagement
-
- avg_engagement = total_engagement / len(tweets)
- engagement_rate = (avg_engagement / followers_count) * 100
-
- if engagement_rate >= 5:
- return 100
- elif engagement_rate >= 2:
- return 80
- elif engagement_rate >= 1:
- return 60
- elif engagement_rate >= 0.5:
- return 40
- else:
- return 20
-
- def _calculate_authenticity_score(self, user: Any) -> int:
- """Calculate authenticity score based on account indicators (0-100)"""
- score = 0
-
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
- if account_age_days > 1095:
- score += 30
- elif account_age_days > 365:
- score += 20
- elif account_age_days > 180:
- score += 10
-
- if user.verified:
- score += 25
-
- if user.profile_image_url:
- score += 15
- if user.description and len(user.description) > 20:
- score += 15
- if user.location:
- score += 10
- if user.url:
- score += 5
-
- return min(100, score)
-
- def _calculate_influence_score(self, user: Any) -> int:
- """Calculate influence score based on follower metrics (0-100)"""
- followers = user.public_metrics.get("followers_count", 0)
-
- if followers >= 100000:
- return 100
- elif followers >= 50000:
- return 80
- elif followers >= 10000:
- return 60
- elif followers >= 1000:
- return 40
- else:
- return 20
-
- def _get_crypto_content_percentage(self, tweets: List[Any]) -> float:
- """Get the percentage of tweets that contain crypto content"""
- if not tweets:
- return 0.0
-
- crypto_tweets = sum(
- 1 for tweet in tweets if any(keyword in tweet.text.lower() for keyword in self.crypto_keywords)
- )
- return (crypto_tweets / len(tweets)) * 100
diff --git a/src/talos/tools/dexscreener.py b/src/talos/tools/dexscreener.py
deleted file mode 100644
index 8e285943..00000000
--- a/src/talos/tools/dexscreener.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from pydantic import BaseModel, Field
-
-from ..models.dexscreener import DexscreenerData
-from ..utils.dexscreener import get_ohlcv_data
-from .base import SupervisedTool
-
-
-class DexscreenerToolArgs(BaseModel):
- token_address: str = Field(..., description="The address of the token to get the price for")
-
-
-class DexscreenerTool(SupervisedTool):
- name: str = "dexscreener_tool"
- description: str = "Gets the price of a token from dexscreener.com"
- args_schema: type[BaseModel] = DexscreenerToolArgs
-
- def _run_unsupervised(self, token_address: str, **kwargs: Any) -> DexscreenerData:
- """Gets the price of a token from dexscreener.com"""
- pair_address = "0xdaae914e4bae2aae4f536006c353117b90fb37e3"
- return get_ohlcv_data(pair_address)
diff --git a/src/talos/tools/document_loader.py b/src/talos/tools/document_loader.py
deleted file mode 100644
index e43d0f2d..00000000
--- a/src/talos/tools/document_loader.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from pydantic import BaseModel, Field, PrivateAttr
-
-from talos.data.dataset_manager import DatasetManager
-from talos.tools.base import SupervisedTool
-
-
-class DocumentLoaderArgs(BaseModel):
- name: str = Field(..., description="Name for the dataset")
- source: str = Field(..., description="IPFS hash or URL of the document")
- chunk_size: int = Field(1000, description="Maximum size of each text chunk")
- chunk_overlap: int = Field(200, description="Number of characters to overlap between chunks")
-
-
-class DocumentLoaderTool(SupervisedTool):
- """Tool for loading documents from IPFS or URLs into the DatasetManager."""
-
- name: str = "document_loader"
- description: str = "Loads documents from IPFS hashes or URLs and adds them to the dataset manager with intelligent chunking for RAG"
- args_schema: type[BaseModel] = DocumentLoaderArgs
- _dataset_manager: DatasetManager = PrivateAttr()
-
- def __init__(self, dataset_manager: DatasetManager, **kwargs):
- super().__init__(**kwargs)
- self._dataset_manager = dataset_manager
-
- def _run_unsupervised(
- self, name: str, source: str, chunk_size: int = 1000, chunk_overlap: int = 200, **kwargs: Any
- ) -> str:
- """Load document from IPFS hash or URL."""
- try:
- all_datasets = self._dataset_manager.get_all_datasets()
- if name in all_datasets:
- return f"Dataset '{name}' already exists. Use dataset_search to query existing content."
-
- if self._is_ipfs_hash(source):
- self._dataset_manager.add_document_from_ipfs(name, source, chunk_size, chunk_overlap)
- return f"Successfully loaded document from IPFS hash {source} into dataset '{name}'"
- else:
- self._dataset_manager.add_document_from_url(name, source, chunk_size, chunk_overlap)
- return f"Successfully loaded document from URL {source} into dataset '{name}'"
- except Exception as e:
- return f"Failed to load document: {str(e)}"
-
- def _is_ipfs_hash(self, source: str) -> bool:
- """Check if source is an IPFS hash."""
- if source.startswith("Qm") and len(source) == 46:
- return True
- if source.startswith("b") and len(source) > 46:
- return True
- if source.startswith("ipfs://"):
- return True
- return False
-
-
-class DatasetSearchArgs(BaseModel):
- query: str = Field(..., description="Search query")
- k: int = Field(5, description="Number of results to return")
-
-
-class DatasetSearchTool(SupervisedTool):
- """Tool for searching datasets in the DatasetManager."""
-
- name: str = "dataset_search"
- description: str = "Search for similar content in loaded datasets"
- args_schema: type[BaseModel] = DatasetSearchArgs
- _dataset_manager: DatasetManager = PrivateAttr()
-
- def __init__(self, dataset_manager: DatasetManager, **kwargs):
- super().__init__(**kwargs)
- self._dataset_manager = dataset_manager
-
- def _run_unsupervised(self, query: str, k: int = 5, **kwargs: Any) -> list[str]:
- """Search for similar content in the datasets."""
- try:
- results = self._dataset_manager.search(query, k)
- if not results:
- return ["No relevant documents found in datasets"]
- return results
- except Exception as e:
- return [f"Search failed: {str(e)}"]
diff --git a/src/talos/tools/general_influence_evaluator.py b/src/talos/tools/general_influence_evaluator.py
deleted file mode 100644
index 48d13c46..00000000
--- a/src/talos/tools/general_influence_evaluator.py
+++ /dev/null
@@ -1,460 +0,0 @@
-import json
-from datetime import datetime, timezone
-from typing import Any, List
-
-from langchain_openai import ChatOpenAI
-
-from ..models.evaluation import EvaluationResult
-from ..prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from .twitter_client import TwitterClient
-from .twitter_evaluator import TwitterAccountEvaluator
-
-
-class GeneralInfluenceEvaluator(TwitterAccountEvaluator):
- """
- Evaluates Twitter accounts for general influence and perception.
- Assesses follower metrics, engagement rates, authenticity, and credibility
- to determine overall influence perception.
- """
-
- def __init__(self, twitter_client: TwitterClient, llm: Any = None, prompt_manager: FilePromptManager | None = None):
- self.twitter_client = twitter_client
- self.llm = llm or ChatOpenAI()
- if prompt_manager is None:
- import os
-
- prompts_dir = os.path.join(os.path.dirname(__file__), "..", "prompts")
- self.prompt_manager = FilePromptManager(prompts_dir)
- else:
- self.prompt_manager = prompt_manager
-
- def evaluate(self, user: Any) -> EvaluationResult:
- followers_count = user.public_metrics.get("followers_count", 0)
- following_count = user.public_metrics.get("following_count", 0)
- follower_following_ratio = followers_count / following_count if following_count > 0 else followers_count
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
- is_verified = user.verified
- has_custom_profile_image = bool(user.profile_image_url)
-
- base_score = 0
- if follower_following_ratio > 1:
- base_score += 25
- if account_age_days > 365:
- base_score += 25
- if is_verified:
- base_score += 25
- if has_custom_profile_image:
- base_score += 25
-
- base_data = {
- "follower_following_ratio": follower_following_ratio,
- "account_age_days": account_age_days,
- "is_verified": is_verified,
- "has_custom_profile_image": has_custom_profile_image,
- }
-
- try:
- user_timeline = self.twitter_client.get_user_timeline(user.username)
- content_quality_score = self._calculate_content_quality_score(user_timeline)
- engagement_score = self._calculate_engagement_score(user, user_timeline)
- authenticity_score = self._calculate_authenticity_score(user, user_timeline)
- influence_score = self._calculate_influence_score(user)
- credibility_score = self._calculate_credibility_score(user, user_timeline)
- except Exception:
- content_quality_score = 0
- engagement_score = 0
- authenticity_score = base_score // 4
- influence_score = 0
- credibility_score = 0
- user_timeline = []
-
- overall_score = min(
- 100,
- int(
- base_score * 0.15
- + content_quality_score * 0.20
- + engagement_score * 0.25
- + authenticity_score * 0.15
- + influence_score * 0.15
- + credibility_score * 0.10
- ),
- )
-
- return EvaluationResult(
- score=overall_score,
- additional_data={
- **base_data,
- "content_quality_score": content_quality_score,
- "engagement_score": engagement_score,
- "authenticity_score": authenticity_score,
- "influence_score": influence_score,
- "credibility_score": credibility_score,
- "total_tweets_analyzed": len(user_timeline),
- "evaluation_type": "general_influence",
- },
- )
-
- def _calculate_content_quality_score(self, tweets: List[Any]) -> int:
- """Calculate content quality score using LLM analysis (0-100)"""
- if not tweets:
- return 0
-
- tweets_text = "\n".join([f"- {tweet.text}" for tweet in tweets[:20]])
-
- try:
- prompt = self.prompt_manager.get_prompt("general_influence_content_quality_prompt")
- if not prompt:
- return self._fallback_content_analysis(tweets)
-
- formatted_prompt = prompt.format(tweets_text=tweets_text)
-
- response = self.llm.invoke(formatted_prompt)
-
- try:
- content = response.content if isinstance(response.content, str) else str(response.content)
- result = json.loads(content.strip())
- quality_score = result.get("content_quality_score", 0)
- originality_score = result.get("originality_score", 0)
-
- combined_score = int(quality_score * 0.6 + originality_score * 0.4)
- return min(100, max(0, combined_score))
-
- except (json.JSONDecodeError, KeyError):
- return self._fallback_content_analysis(tweets)
-
- except Exception:
- return self._fallback_content_analysis(tweets)
-
- def _fallback_content_analysis(self, tweets: List[Any]) -> int:
- """Fallback content quality analysis based on basic metrics"""
- if not tweets:
- return 0
-
- total_length = sum(len(tweet.text) for tweet in tweets)
- avg_length = total_length / len(tweets)
-
- if avg_length >= 200:
- length_score = 80
- elif avg_length >= 100:
- length_score = 60
- elif avg_length >= 50:
- length_score = 40
- else:
- length_score = 20
-
- original_tweets = [tweet for tweet in tweets if not tweet.text.startswith("RT @")]
- originality_ratio = len(original_tweets) / len(tweets) if tweets else 0
-
- if originality_ratio >= 0.8:
- originality_score = 80
- elif originality_ratio >= 0.6:
- originality_score = 60
- elif originality_ratio >= 0.4:
- originality_score = 40
- else:
- originality_score = 20
-
- return int(length_score * 0.6 + originality_score * 0.4)
-
- def _calculate_engagement_score(self, user: Any, tweets: List[Any]) -> int:
- """Calculate engagement quality score (0-100)"""
- followers_count = user.public_metrics.get("followers_count", 0)
- if not tweets or followers_count == 0:
- return 0
-
- total_engagement = 0
- for tweet in tweets:
- engagement = (
- tweet.public_metrics.get("like_count", 0)
- + tweet.public_metrics.get("retweet_count", 0)
- + tweet.public_metrics.get("reply_count", 0)
- )
- total_engagement += engagement
-
- avg_engagement = total_engagement / len(tweets)
- engagement_rate = (avg_engagement / followers_count) * 100
-
- if engagement_rate >= 5:
- return 100
- elif engagement_rate >= 2:
- return 80
- elif engagement_rate >= 1:
- return 60
- elif engagement_rate >= 0.5:
- return 40
- else:
- return 20
-
- def _calculate_authenticity_score(self, user: Any, tweets: List[Any] | None = None) -> int:
- """Calculate enhanced authenticity score with advanced bot detection (0-100)"""
- if tweets is None:
- tweets = []
-
- base_score = self._calculate_base_authenticity(user)
-
- engagement_score = self._calculate_engagement_authenticity(user, tweets)
-
- content_score = self._calculate_content_authenticity(tweets)
-
- temporal_score = self._calculate_temporal_authenticity(tweets)
-
- composite_score = int(
- base_score * 0.40 +
- engagement_score * 0.25 +
- content_score * 0.20 +
- temporal_score * 0.15
- )
-
- return min(100, max(0, composite_score))
-
- def _calculate_base_authenticity(self, user: Any) -> int:
- """Calculate base authenticity score from account indicators (0-100)"""
- score = 0
-
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
- if account_age_days > 1825: # 5+ years
- score += 35
- elif account_age_days > 1095: # 3+ years
- score += 30
- elif account_age_days > 730: # 2+ years
- score += 25
- elif account_age_days > 365: # 1+ year
- score += 20
- elif account_age_days > 180: # 6+ months
- score += 10
- elif account_age_days < 30: # Suspicious new accounts
- score -= 10
-
- if user.verified:
- score += 25
-
- if user.profile_image_url and not user.profile_image_url.endswith('default_profile_images/'):
- score += 15
- if user.description and len(user.description) > 20:
- score += 10
- if user.location:
- score += 5
- if user.url:
- score += 5
-
- following = user.public_metrics.get("following_count", 0)
-
- if following > 50000:
- score -= 15
- elif following > 10000:
- score -= 5
-
- return min(100, max(0, score))
-
- def _calculate_engagement_authenticity(self, user: Any, tweets: List[Any]) -> int:
- """Analyze engagement patterns for authenticity indicators (0-100)"""
- if not tweets:
- return 50 # Neutral score when no data available
-
- score = 50 # Start with neutral
- followers = user.public_metrics.get("followers_count", 0)
-
- if followers == 0:
- return 20 # Very suspicious
-
- engagement_rates = []
- for tweet in tweets[:20]: # Analyze recent tweets
- engagement = (
- tweet.public_metrics.get("like_count", 0) +
- tweet.public_metrics.get("retweet_count", 0) +
- tweet.public_metrics.get("reply_count", 0)
- )
- rate = (engagement / followers) * 100
- engagement_rates.append(rate)
-
- if engagement_rates:
- avg_rate = sum(engagement_rates) / len(engagement_rates)
- rate_variance = sum((r - avg_rate) ** 2 for r in engagement_rates) / len(engagement_rates)
-
- if rate_variance < 0.1: # Very consistent
- score += 20
- elif rate_variance < 1.0: # Reasonably consistent
- score += 10
- elif rate_variance > 10.0: # Highly inconsistent (suspicious)
- score -= 15
-
- if avg_rate > 10: # >10% engagement rate is unusual
- score -= 20
- elif avg_rate > 5:
- score -= 10
- elif avg_rate < 0.1: # Very low engagement also suspicious
- score -= 10
-
- like_counts = [t.public_metrics.get("like_count", 0) for t in tweets[:10]]
- retweet_counts = [t.public_metrics.get("retweet_count", 0) for t in tweets[:10]]
-
- if sum(like_counts) > 0 and sum(retweet_counts) > 0:
- like_rt_ratio = sum(like_counts) / sum(retweet_counts)
- if 2 <= like_rt_ratio <= 20: # Normal range
- score += 15
- else: # Unusual ratios
- score -= 10
-
- return min(100, max(0, score))
-
- def _calculate_content_authenticity(self, tweets: List[Any]) -> int:
- """Analyze content patterns for authenticity indicators (0-100)"""
- if not tweets:
- return 50 # Neutral score when no data available
-
- score = 50 # Start with neutral
-
- tweet_texts = [tweet.text for tweet in tweets[:20]]
- unique_texts = set(tweet_texts)
-
- if len(tweet_texts) > 0:
- uniqueness_ratio = len(unique_texts) / len(tweet_texts)
- if uniqueness_ratio > 0.9: # High uniqueness
- score += 25
- elif uniqueness_ratio > 0.7:
- score += 15
- elif uniqueness_ratio < 0.5: # Low uniqueness (suspicious)
- score -= 20
-
- original_tweets = [t for t in tweets if not t.text.startswith("RT @")]
-
- if len(tweets) > 0:
- original_ratio = len(original_tweets) / len(tweets)
- if original_ratio > 0.7: # Mostly original content
- score += 20
- elif original_ratio < 0.3: # Mostly retweets (suspicious)
- score -= 15
-
- hashtag_counts = []
- for tweet in tweets[:10]:
- hashtag_count = tweet.text.count('#')
- hashtag_counts.append(hashtag_count)
-
- if hashtag_counts:
- avg_hashtags = sum(hashtag_counts) / len(hashtag_counts)
- if avg_hashtags > 5: # Excessive hashtag use
- score -= 15
- elif 1 <= avg_hashtags <= 3: # Normal hashtag use
- score += 10
-
- if original_tweets:
- avg_length = sum(len(t.text) for t in original_tweets) / len(original_tweets)
- if avg_length > 100: # Longer, more thoughtful tweets
- score += 15
- elif avg_length < 30: # Very short tweets (suspicious)
- score -= 10
-
- return min(100, max(0, score))
-
- def _calculate_temporal_authenticity(self, tweets: List[Any]) -> int:
- """Analyze temporal posting patterns for authenticity indicators (0-100)"""
- if not tweets:
- return 50 # Neutral score when no data available
-
- score = 50 # Start with neutral
-
- # Analyze posting frequency
- tweets_with_dates = [t for t in tweets if t.created_at]
- if len(tweets_with_dates) < 2:
- return score
-
- timestamps = []
- for tweet in tweets_with_dates[:20]:
- try:
- if isinstance(tweet.created_at, str):
- timestamp = datetime.fromisoformat(tweet.created_at.replace('Z', '+00:00'))
- else:
- timestamp = tweet.created_at
- timestamps.append(timestamp)
- except (ValueError, AttributeError, TypeError):
- continue
-
- if len(timestamps) < 2:
- return score
-
- timestamps.sort()
- intervals = []
- for i in range(1, len(timestamps)):
- interval = (timestamps[i] - timestamps[i-1]).total_seconds()
- intervals.append(interval)
-
- if intervals:
- avg_interval = sum(intervals) / len(intervals)
- interval_variance = sum((i - avg_interval) ** 2 for i in intervals) / len(intervals)
-
- if interval_variance < (avg_interval * 0.1) ** 2 and len(intervals) > 5:
- score -= 20 # Too regular
- elif interval_variance > (avg_interval * 2) ** 2:
- score += 10 # Natural variance
-
- if avg_interval < 300: # Less than 5 minutes average
- score -= 25
- elif avg_interval < 3600: # Less than 1 hour average
- score -= 10
-
- return min(100, max(0, score))
-
- def _calculate_influence_score(self, user: Any) -> int:
- """Calculate influence score based on follower metrics (0-100)"""
- followers = user.public_metrics.get("followers_count", 0)
- following = user.public_metrics.get("following_count", 0)
-
- if followers >= 1000000: # 1M+
- follower_score = 100
- elif followers >= 100000: # 100K+
- follower_score = 80
- elif followers >= 10000: # 10K+
- follower_score = 60
- elif followers >= 1000: # 1K+
- follower_score = 40
- else:
- follower_score = 20
-
- if following > 0:
- ratio = followers / following
- if ratio >= 10:
- ratio_bonus = 20
- elif ratio >= 5:
- ratio_bonus = 15
- elif ratio >= 2:
- ratio_bonus = 10
- elif ratio >= 1:
- ratio_bonus = 5
- else:
- ratio_bonus = 0
- else:
- ratio_bonus = 20 # Following 0 people is unusual but could indicate high status
-
- return min(100, follower_score + ratio_bonus)
-
- def _calculate_credibility_score(self, user: Any, tweets: List[Any]) -> int:
- """Calculate credibility score based on account and content indicators (0-100)"""
- score = 0
-
- if user.verified:
- score += 40
-
- if user.description:
- score += 15
- if user.location:
- score += 10
- if user.url:
- score += 10
-
- if tweets:
- tweet_count = user.public_metrics.get("tweet_count", 0)
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
-
- if account_age_days > 0:
- tweets_per_day = tweet_count / account_age_days
- if 0.5 <= tweets_per_day <= 10: # Reasonable posting frequency
- score += 15
- elif tweets_per_day <= 20: # Moderate posting
- score += 10
- else: # Too much or too little posting
- score += 5
-
- if user.url and any(domain in user.url for domain in ['.com', '.org', '.edu', '.gov']):
- score += 10
-
- return min(100, score)
diff --git a/src/talos/tools/gitbook.py b/src/talos/tools/gitbook.py
deleted file mode 100644
index 0b4855ec..00000000
--- a/src/talos/tools/gitbook.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import annotations
-
-from enum import Enum
-from typing import Any
-
-import requests
-from pydantic import BaseModel, Field, PrivateAttr
-
-from .base import SupervisedTool
-from ..settings import GitBookSettings
-
-
-class GitBookToolName(str, Enum):
- READ_PAGE = "read_page"
- UPDATE_PAGE = "update_page"
-
-
-class GitBookToolArgs(BaseModel):
- tool_name: GitBookToolName = Field(..., description="The name of the tool to run")
- page_url: str = Field(..., description="The URL of the GitBook page")
- content: str | None = Field(None, description="The content to update the page with")
-
-
-class GitBookTool(SupervisedTool):
- name = "gitbook_tool"
- description = "Provides tools for interacting with the GitBook API."
- args_schema: type[BaseModel] = GitBookToolArgs
- _session: requests.Session = PrivateAttr()
-
- def model_post_init(self, __context: Any) -> None:
- settings = GitBookSettings()
- self._session = requests.Session()
- self._session.headers.update({"Authorization": f"Bearer {settings.GITBOOK_API_KEY}"})
-
- def read_page(self, page_url: str) -> str:
- """
- Reads a GitBook page.
- """
- response = self._session.get(page_url)
- response.raise_for_status()
- return response.text
-
- def update_page(self, page_url: str, content: str) -> str:
- """
- Updates a GitBook page.
- """
- response = self._session.put(page_url, json={"content": content})
- response.raise_for_status()
- return f"Updated GitBook page: {page_url}"
-
- def _run_unsupervised(self, tool_name: str, **kwargs: Any) -> str:
- if tool_name == "read_page":
- return self.read_page(**kwargs)
- elif tool_name == "update_page":
- return self.update_page(**kwargs)
- else:
- raise ValueError(f"Unknown tool: {tool_name}")
diff --git a/src/talos/tools/github/__init__.py b/src/talos/tools/github/__init__.py
deleted file mode 100644
index 5fc6b5bf..00000000
--- a/src/talos/tools/github/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .tools import GithubTools
-
-__all__ = ["GithubTools"]
diff --git a/src/talos/tools/github/tools.py b/src/talos/tools/github/tools.py
deleted file mode 100644
index f1a4aeff..00000000
--- a/src/talos/tools/github/tools.py
+++ /dev/null
@@ -1,252 +0,0 @@
-from typing import Any
-import time
-import logging
-
-from github import Auth, Github
-from pydantic import BaseModel, Field, PrivateAttr
-
-from ...settings import GitHubSettings
-from ...utils.validation import validate_github_username, validate_github_repo_name, mask_sensitive_data
-from ...utils.http_client import SecureHTTPClient
-
-logger = logging.getLogger(__name__)
-
-
-class GithubTools(BaseModel):
- """
- A collection of tools for interacting with the Github API.
- """
-
- token: str | None = Field(default_factory=lambda: GitHubSettings().GITHUB_API_TOKEN)
- _github: Github = PrivateAttr()
- _http_client: SecureHTTPClient = PrivateAttr()
- _headers: dict[str, str] = PrivateAttr()
- _repo_cache: dict[str, tuple[Any, float]] = PrivateAttr(default_factory=dict)
- _cache_ttl: int = PrivateAttr(default=300)
-
- def model_post_init(self, __context: Any) -> None:
- if not self.token:
- raise ValueError("Github token not provided.")
-
- self._github = Github(auth=Auth.Token(self.token))
- self._http_client = SecureHTTPClient()
- self._headers = {"Authorization": f"token {self.token}"}
-
- masked_token = mask_sensitive_data(self.token)
- logger.info(f"GitHub client initialized with token: {masked_token}")
-
- def _validate_repo_params(self, user: str, project: str) -> None:
- """Validate repository parameters."""
- if not validate_github_username(user):
- raise ValueError(f"Invalid GitHub username: {user}")
- if not validate_github_repo_name(project):
- raise ValueError(f"Invalid GitHub repository name: {project}")
-
- def _get_repo_cached(self, repo_key: str):
- """Get repository with caching to avoid repeated API calls."""
- current_time = time.time()
-
- if repo_key in self._repo_cache:
- repo, cached_time = self._repo_cache[repo_key]
- if current_time - cached_time < self._cache_ttl:
- return repo
-
- repo = self._github.get_repo(repo_key)
- self._repo_cache[repo_key] = (repo, current_time)
- return repo
-
- def get_open_issues(self, user: str, project: str) -> list[dict[str, Any]]:
- """
- Gets all open issues for a given repository.
- """
- self._validate_repo_params(user, project)
- repo = self._get_repo_cached(f"{user}/{project}")
- return [
- {"number": issue.number, "title": issue.title, "url": issue.html_url}
- for issue in repo.get_issues(state="open")
- ]
-
- def get_all_pull_requests(self, user: str, project: str, state: str = "open") -> list[dict[str, Any]]:
- """
- Gets all pull requests for a given repository.
-
- :param state: Can be one of 'open', 'closed', or 'all'.
- """
- self._validate_repo_params(user, project)
- if state not in ["open", "closed", "all"]:
- raise ValueError(f"Invalid state: {state}. Must be 'open', 'closed', or 'all'")
- repo = self._get_repo_cached(f"{user}/{project}")
- return [{"number": pr.number, "title": pr.title, "url": pr.html_url} for pr in repo.get_pulls(state=state)]
-
- def get_issue_comments(self, user: str, project: str, issue_number: int) -> list[dict[str, Any]]:
- """
- Gets all comments for a given issue.
- """
- self._validate_repo_params(user, project)
- if not isinstance(issue_number, int) or issue_number <= 0:
- raise ValueError(f"Invalid issue number: {issue_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- issue = repo.get_issue(number=issue_number)
- comments = []
- for comment in issue.get_comments():
- comments.append(
- {
- "user": comment.user.login,
- "comment": comment.body,
- "reply_to": None,
- }
- )
- return comments
-
- def get_pr_comments(self, user: str, project: str, pr_number: int) -> list[dict[str, Any]]:
- """
- Gets all comments for a given pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(pr_number)
- comments = []
- for comment in pr.get_issue_comments():
- comments.append(
- {
- "user": comment.user.login,
- "comment": comment.body,
- }
- )
- return comments
-
- def reply_to_issue(self, user: str, project: str, issue_number: int, comment: str) -> None:
- """
- Replies to a given issue.
- """
- self._validate_repo_params(user, project)
- if not isinstance(issue_number, int) or issue_number <= 0:
- raise ValueError(f"Invalid issue number: {issue_number}")
- if not comment or not comment.strip():
- raise ValueError("Comment cannot be empty")
- from ...utils.validation import sanitize_user_input
- comment = sanitize_user_input(comment, max_length=65536)
- repo = self._get_repo_cached(f"{user}/{project}")
- issue = repo.get_issue(number=issue_number)
- issue.create_comment(comment)
-
- def get_pr_files(self, user: str, project: str, pr_number: int) -> list[str]:
- """
- Gets all files for a given pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- return [file.filename for file in pr.get_files()]
-
- def get_pr_diff(self, user: str, project: str, pr_number: int) -> str:
- """
- Gets the diff for a given pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- response = self._http_client.get(pr.patch_url, headers=self._headers)
- return response.text
-
- def get_project_structure(self, user: str, project: str, path: str = "") -> list[str]:
- """
- Gets the project structure for a given repository.
- """
- self._validate_repo_params(user, project)
- from ...utils.validation import sanitize_user_input
- path = sanitize_user_input(path, max_length=255)
- repo = self._get_repo_cached(f"{user}/{project}")
- contents = repo.get_contents(path)
- if isinstance(contents, list):
- return [content.path for content in contents]
- return [contents.path]
-
- def get_file_content(self, user: str, project: str, filepath: str) -> str:
- """
- Gets the content of a file.
- """
- self._validate_repo_params(user, project)
- if not filepath or not filepath.strip():
- raise ValueError("Filepath cannot be empty")
- from ...utils.validation import sanitize_user_input
- filepath = sanitize_user_input(filepath, max_length=255)
- repo = self._get_repo_cached(f"{user}/{project}")
- content = repo.get_contents(filepath)
- if isinstance(content, list):
- raise ValueError("Path is a directory, not a file.")
- return content.decoded_content.decode()
-
- def merge_pr(self, user: str, project: str, pr_number: int) -> None:
- """
- Merges a pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- pr.merge()
-
- def review_pr(self, user: str, project: str, pr_number: int, feedback: str) -> None:
- """
- Reviews a pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- if not feedback or not feedback.strip():
- raise ValueError("Feedback cannot be empty")
- from ...utils.validation import sanitize_user_input
- feedback = sanitize_user_input(feedback, max_length=65536)
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- pr.create_review(body=feedback, event="COMMENT")
-
- def comment_on_pr(self, user: str, project: str, pr_number: int, comment: str) -> None:
- """
- Comments on a pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- if not comment or not comment.strip():
- raise ValueError("Comment cannot be empty")
- from ...utils.validation import sanitize_user_input
- comment = sanitize_user_input(comment, max_length=65536)
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- pr.create_issue_comment(comment)
-
- def approve_pr(self, user: str, project: str, pr_number: int) -> None:
- """
- Approves a pull request.
- """
- self._validate_repo_params(user, project)
- if not isinstance(pr_number, int) or pr_number <= 0:
- raise ValueError(f"Invalid PR number: {pr_number}")
- repo = self._get_repo_cached(f"{user}/{project}")
- pr = repo.get_pull(number=pr_number)
- pr.create_review(event="APPROVE")
-
- def create_issue(self, user: str, project: str, title: str, body: str) -> dict[str, Any]:
- """
- Creates a new issue.
- """
- self._validate_repo_params(user, project)
- if not title or not title.strip():
- raise ValueError("Issue title cannot be empty")
- if not body or not body.strip():
- raise ValueError("Issue body cannot be empty")
- from ...utils.validation import sanitize_user_input
- title = sanitize_user_input(title, max_length=256)
- body = sanitize_user_input(body, max_length=65536)
- repo = self._get_repo_cached(f"{user}/{project}")
- issue = repo.create_issue(title=title, body=body)
- return {"number": issue.number, "title": issue.title, "url": issue.html_url}
diff --git a/src/talos/tools/ipfs.py b/src/talos/tools/ipfs.py
deleted file mode 100644
index 6fc59a37..00000000
--- a/src/talos/tools/ipfs.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from enum import Enum
-
-import ipfshttpclient
-from langchain.tools import BaseTool
-from pydantic import BaseModel, Field
-
-
-class IpfsToolName(str, Enum):
- ADD_CONTENT = "add_content"
- GET_CONTENT = "get_content"
-
-
-class IpfsToolArgs(BaseModel):
- tool_name: IpfsToolName = Field(..., description="The name of the tool to run")
- content: str | None = Field(None, description="The content to add to IPFS")
- hash: str | None = Field(None, description="The hash of the content to get from IPFS")
-
-
-class IpfsTool(BaseTool):
- name: str = "ipfs_tool"
- description: str = "Provides tools for interacting with IPFS."
- args_schema: type[BaseModel] = IpfsToolArgs
-
- def __init__(self):
- super().__init__()
- self.client = ipfshttpclient.connect()
-
- def add_content(self, content: str) -> str:
- """
- Adds content to IPFS.
- """
- return self.client.add_str(content)
-
- def get_content(self, hash: str) -> str:
- """
- Gets content from IPFS.
- """
- return self.client.cat(hash).decode()
-
- def _run(self, tool_name: str, **kwargs):
- if tool_name == "add_content":
- return self.add_content(**kwargs)
- elif tool_name == "get_content":
- return self.get_content(**kwargs)
- else:
- raise ValueError(f"Unknown tool: {tool_name}")
diff --git a/src/talos/tools/memory_tool.py b/src/talos/tools/memory_tool.py
deleted file mode 100644
index 3083d1fb..00000000
--- a/src/talos/tools/memory_tool.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Type
-
-from langchain_core.tools import BaseTool
-from pydantic import BaseModel, Field
-
-if TYPE_CHECKING:
- from talos.core.agent import Agent
-
-
-class AddMemorySchema(BaseModel):
- """Pydantic schema for adding a memory."""
-
- description: str = Field(..., description="The description of the memory to add.")
-
-
-class AddMemoryTool(BaseTool):
- """Tool for adding a memory to the agent's memory."""
-
- name: str = "add_memory"
- description: str = "Adds a memory to the agent's memory."
- args_schema: Type[BaseModel] = AddMemorySchema
- agent: Agent
-
- def _run(self, description: str) -> str:
- """Adds a memory to the agent's memory."""
- self.agent.add_memory(description)
- return f"Stored in memory: {description}"
diff --git a/src/talos/tools/supervised_tool.py b/src/talos/tools/supervised_tool.py
deleted file mode 100644
index bf7d66f6..00000000
--- a/src/talos/tools/supervised_tool.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from langchain_core.tools import BaseTool
-
-from talos.hypervisor.supervisor import Supervisor
-
-
-class SupervisedTool(BaseTool):
- """
- A tool that is supervised by a hypervisor.
- """
-
- tool: BaseTool
- supervisor: Supervisor | None = None
- messages: list
-
- def set_supervisor(self, supervisor: Supervisor | None):
- """
- Sets the supervisor for the tool.
- """
- self.supervisor = supervisor
-
- def _run(self, *args: Any, **kwargs: Any) -> Any:
- """
- Runs the tool.
- """
- tool_input = args[0] if args else kwargs
- if self.supervisor:
- approved, error_message = self.supervisor.approve(self.name, tool_input)
- if approved:
- return self.tool.run(tool_input, **kwargs)
- else:
- return error_message or f"Tool call to '{self.name}' denied by supervisor."
- return self.tool.run(tool_input, **kwargs)
diff --git a/src/talos/tools/tool_manager.py b/src/talos/tools/tool_manager.py
deleted file mode 100644
index c1672771..00000000
--- a/src/talos/tools/tool_manager.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from __future__ import annotations
-
-from langchain_core.tools import BaseTool
-
-
-class ToolManager:
- """
- A class for managing and discovering tools for the Talos agent.
- """
-
- def __init__(self) -> None:
- self.tools: dict[str, BaseTool] = {}
-
- def register_tool(self, tool: BaseTool) -> None:
- """
- Registers a tool with the ToolManager.
- """
- if tool.name in self.tools:
- raise ValueError(f"Tool with name '{tool.name}' already registered.")
- self.tools[tool.name] = tool
-
- def unregister_tool(self, tool_name: str) -> None:
- """
- Unregisters a tool from the ToolManager.
- """
- if tool_name not in self.tools:
- raise ValueError(f"Tool with name '{tool_name}' not found.")
- del self.tools[tool_name]
-
- def get_tool(self, tool_name: str) -> BaseTool:
- """
- Gets a tool by name.
- """
- if tool_name not in self.tools:
- raise ValueError(f"Tool with name '{tool_name}' not found.")
- return self.tools[tool_name]
-
- def get_all_tools(self) -> list[BaseTool]:
- """
- Gets all registered tools.
- """
- return list(self.tools.values())
diff --git a/src/talos/tools/twitter.py b/src/talos/tools/twitter.py
deleted file mode 100644
index 3c3c28ad..00000000
--- a/src/talos/tools/twitter.py
+++ /dev/null
@@ -1,214 +0,0 @@
-from enum import Enum
-from typing import Any, Optional
-
-import tweepy
-from googleapiclient import discovery
-from langchain.tools import BaseTool
-from pydantic import BaseModel, ConfigDict, Field
-
-from ..models.evaluation import EvaluationResult
-from ..settings import PerspectiveSettings
-from ..skills.twitter_persona import TwitterPersonaSkill
-from .twitter_client import TweepyClient, TwitterClient
-from .twitter_evaluator import DefaultTwitterAccountEvaluator, TwitterAccountEvaluator
-
-MODERATION_THRESHOLD = 0.7
-
-
-class TwitterToolName(str, Enum):
- POST_TWEET = "post_tweet"
- GET_ALL_REPLIES = "get_all_replies"
- REPLY_TO_TWEET = "reply_to_tweet"
- GET_FOLLOWER_COUNT = "get_follower_count"
- GET_FOLLOWING_COUNT = "get_following_count"
- GET_TWEET_ENGAGEMENT = "get_tweet_engagement"
- EVALUATE_ACCOUNT = "evaluate_account"
- EVALUATE_CRYPTO_INFLUENCER = "evaluate_crypto_influencer"
- GENERATE_PERSONA_PROMPT = "generate_persona_prompt"
-
-
-class TwitterToolArgs(BaseModel):
- tool_name: TwitterToolName = Field(..., description="The name of the tool to run")
- tweet: str | None = Field(None, description="The content of the tweet")
- tweet_id: str | None = Field(None, description="The ID of the tweet")
- username: str | None = Field(None, description="The username of the user")
- search_query: str | None = Field(None, description="The search query to use")
-
-
-class TwitterTool(BaseTool):
- name: str = "twitter_tool"
- description: str = "Provides tools for interacting with the Twitter API."
- args_schema: type[BaseModel] = TwitterToolArgs
- twitter_client: Optional[TwitterClient] = None
- account_evaluator: Optional[TwitterAccountEvaluator] = None
- perspective_client: Optional[Any] = None
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- def __init__(
- self,
- twitter_client: Optional[TwitterClient] = None,
- account_evaluator: Optional[TwitterAccountEvaluator] = None,
- ):
- super().__init__()
- self.twitter_client = twitter_client or TweepyClient()
- self.account_evaluator = account_evaluator or DefaultTwitterAccountEvaluator()
- self.perspective_client = self._initialize_perspective_client()
-
- def _initialize_perspective_client(self) -> Optional[Any]:
- """Initializes the Perspective API client."""
- try:
- settings = PerspectiveSettings()
- if not settings.PERSPECTIVE_API_KEY:
- return None
- return discovery.build(
- "commentanalyzer",
- "v1alpha1",
- developerKey=settings.PERSPECTIVE_API_KEY,
- discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
- static_discovery=False,
- )
- except ValueError:
- return None
-
- def post_tweet(self, tweet: str) -> str:
- """Posts a tweet."""
- from ..utils.validation import sanitize_user_input
- if not tweet or not tweet.strip():
- raise ValueError("Tweet content cannot be empty")
- if len(tweet) > 280:
- raise ValueError("Tweet content exceeds 280 characters")
- tweet = sanitize_user_input(tweet, max_length=280)
-
- if self.perspective_client:
- if not self.is_content_appropriate(tweet):
- return "Tweet not sent. Content is inappropriate."
- assert self.twitter_client is not None
- self.twitter_client.post_tweet(tweet)
- return f"Posted tweet: {tweet[:50]}{'...' if len(tweet) > 50 else ''}"
-
- def get_all_replies(self, tweet_id: str) -> list[tweepy.Tweet]:
- """Gets all replies to a tweet."""
- # This functionality is not yet migrated to the new TwitterClient
- raise NotImplementedError
-
- def reply_to_tweet(self, tweet_id: str, tweet: str) -> str:
- """Replies to a tweet."""
- from ..utils.validation import sanitize_user_input
- if not tweet_id or not tweet_id.strip():
- raise ValueError("Tweet ID cannot be empty")
- if not tweet or not tweet.strip():
- raise ValueError("Tweet content cannot be empty")
- if len(tweet) > 280:
- raise ValueError("Tweet content exceeds 280 characters")
- tweet = sanitize_user_input(tweet, max_length=280)
-
- if self.perspective_client:
- if not self.is_content_appropriate(tweet):
- return "Tweet not sent. Content is inappropriate."
- assert self.twitter_client is not None
- self.twitter_client.reply_to_tweet(tweet_id, tweet)
- return f"Replied to tweet {tweet_id}: {tweet[:50]}{'...' if len(tweet) > 50 else ''}"
-
- def is_content_appropriate(self, text: str) -> bool:
- """
- Checks if the content is appropriate using the Perspective API.
-
- Args:
- text: The text to analyze.
-
- Returns:
- True if the content is appropriate, False otherwise.
- """
- if not self.perspective_client:
- return True
-
- analyze_request = {
- "comment": {"text": text},
- "requestedAttributes": {"TOXICITY": {}},
- }
-
- response = self.perspective_client.comments().analyze(body=analyze_request).execute()
- toxicity_score = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"]
-
- return toxicity_score < MODERATION_THRESHOLD
-
- def get_follower_count(self, username: str) -> int:
- """Gets the follower count for a user."""
- from ..utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
- assert self.twitter_client is not None
- user = self.twitter_client.get_user(username)
- return user.public_metrics.followers_count
-
- def get_following_count(self, username: str) -> int:
- """Gets the following count for a user."""
- from ..utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
- assert self.twitter_client is not None
- user = self.twitter_client.get_user(username)
- return user.public_metrics.following_count
-
- def get_tweet_engagement(self, tweet_id: str) -> dict:
- """Gets the engagement for a tweet."""
- # This functionality is not yet migrated to the new TwitterClient
- raise NotImplementedError
-
- def evaluate_account(self, username: str) -> EvaluationResult:
- """Evaluates a Twitter account and returns a score."""
- from ..utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
- assert self.twitter_client is not None
- assert self.account_evaluator is not None
- user = self.twitter_client.get_user(username)
- return self.account_evaluator.evaluate(user)
-
- def evaluate_crypto_influencer(self, username: str) -> dict:
- """Evaluates a Twitter account as a crypto influencer."""
- from .crypto_influencer_evaluator import CryptoInfluencerEvaluator
- from ..utils.validation import validate_twitter_username
-
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
-
- assert self.twitter_client is not None
- evaluator = CryptoInfluencerEvaluator(self.twitter_client)
- user = self.twitter_client.get_user(username)
- result = evaluator.evaluate(user)
-
- return {"username": username, "score": result.score, "evaluation_data": result.additional_data}
-
- def generate_persona_prompt(self, username: str) -> str:
- """Generates a prompt to describe the voice and style of a specific twitter user."""
- from ..utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
- assert self.twitter_client is not None
- persona_skill = TwitterPersonaSkill(twitter_client=self.twitter_client)
- response = persona_skill.run(username=username)
- return response.report
-
- def _run(self, tool_name: str, **kwargs):
- if tool_name == "post_tweet":
- return self.post_tweet(**kwargs)
- elif tool_name == "get_all_replies":
- return self.get_all_replies(**kwargs)
- elif tool_name == "reply_to_tweet":
- return self.reply_to_tweet(**kwargs)
- elif tool_name == "get_follower_count":
- return self.get_follower_count(**kwargs)
- elif tool_name == "get_following_count":
- return self.get_following_count(**kwargs)
- elif tool_name == "get_tweet_engagement":
- return self.get_tweet_engagement(**kwargs)
- elif tool_name == "evaluate_account":
- return self.evaluate_account(**kwargs)
- elif tool_name == "evaluate_crypto_influencer":
- return self.evaluate_crypto_influencer(**kwargs)
- elif tool_name == "generate_persona_prompt":
- return self.generate_persona_prompt(**kwargs)
- else:
- raise ValueError(f"Unknown tool: {tool_name}")
diff --git a/src/talos/tools/twitter_client.py b/src/talos/tools/twitter_client.py
deleted file mode 100644
index 344826f5..00000000
--- a/src/talos/tools/twitter_client.py
+++ /dev/null
@@ -1,286 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import Any, Optional
-import logging
-
-import tweepy
-from pydantic import model_validator
-from pydantic_settings import BaseSettings
-from textblob import TextBlob
-
-from talos.models.twitter import TwitterUser, Tweet, ReferencedTweet
-
-logger = logging.getLogger(__name__)
-
-
-class PaginatedTwitterResponse:
- """
- Response object for paginated Twitter API calls.
-
- Aggregates data from multiple Twitter API responses into a single response-like object
- that maintains compatibility with the expected interface while providing pagination metadata.
- """
-
- def __init__(self, tweets: list[Any], users: list[Any], total_requests: int = 1):
- self.data = tweets
- self.includes = {"users": users}
- self.meta = {"result_count": len(tweets), "paginated": total_requests > 1, "total_requests": total_requests}
- self.errors: list[Any] = []
-
-
-class TwitterConfig(BaseSettings):
- TWITTER_BEARER_TOKEN: Optional[str] = None
-
- @model_validator(mode="after")
- def validate_bearer_token(self):
- if not self.TWITTER_BEARER_TOKEN:
- raise ValueError("TWITTER_BEARER_TOKEN environment variable is required but not set")
-
- from talos.utils.validation import validate_api_token_format, mask_sensitive_data
- if not validate_api_token_format(self.TWITTER_BEARER_TOKEN, 'twitter'):
- logger.warning("Twitter bearer token format appears invalid")
-
- masked_token = mask_sensitive_data(self.TWITTER_BEARER_TOKEN)
- logger.info(f"Twitter client initialized with token: {masked_token}")
-
- return self
-
-
-class TwitterClient(ABC):
- @abstractmethod
- def get_user(self, username: str) -> TwitterUser:
- pass
-
- @abstractmethod
- def search_tweets(
- self, query: str, start_time: Optional[str] = None, max_tweets: int = 500
- ) -> PaginatedTwitterResponse:
- pass
-
- @abstractmethod
- def get_user_timeline(self, username: str) -> list[Tweet]:
- pass
-
- @abstractmethod
- def get_user_mentions(self, username: str) -> list[Tweet]:
- pass
-
- @abstractmethod
- def get_tweet(self, tweet_id: int) -> Tweet:
- pass
-
- @abstractmethod
- def get_sentiment(self, search_query: str = "talos") -> float:
- pass
-
- @abstractmethod
- def post_tweet(self, tweet: str) -> Any:
- pass
-
- @abstractmethod
- def reply_to_tweet(self, tweet_id: str, tweet: str) -> Any:
- pass
-
-
-class TweepyClient(TwitterClient):
- client: tweepy.Client
-
- def __init__(self):
- config = TwitterConfig()
- self.client = tweepy.Client(bearer_token=config.TWITTER_BEARER_TOKEN)
-
- def get_user(self, username: str) -> TwitterUser:
- from talos.utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
-
- response = self.client.get_user(
- username=username,
- user_fields=[
- "created_at",
- "public_metrics",
- "profile_image_url",
- "verified",
- "description",
- "location",
- "url",
- ],
- )
- from talos.models.twitter import TwitterPublicMetrics
- user_data = response.data
- return TwitterUser(
- id=int(user_data.id),
- username=user_data.username,
- name=user_data.name,
- created_at=user_data.created_at,
- profile_image_url=user_data.profile_image_url or "",
- public_metrics=TwitterPublicMetrics(**user_data.public_metrics),
- description=user_data.description,
- url=user_data.url,
- verified=getattr(user_data, 'verified', False)
- )
-
- def search_tweets(
- self, query: str, start_time: Optional[str] = None, max_tweets: int = 500
- ) -> PaginatedTwitterResponse:
- from talos.utils.validation import sanitize_user_input
- if not query or not query.strip():
- raise ValueError("Search query cannot be empty")
- query = sanitize_user_input(query, max_length=500)
- if max_tweets <= 0 or max_tweets > 1000:
- raise ValueError("max_tweets must be between 1 and 1000")
- all_tweets: list[Any] = []
- all_users: list[Any] = []
- next_token = None
- request_count = 0
-
- while len(all_tweets) < max_tweets:
- params = {
- "query": query,
- "tweet_fields": ["public_metrics", "created_at"],
- "expansions": ["author_id"],
- "user_fields": ["public_metrics"],
- "max_results": min(100, max_tweets - len(all_tweets)),
- }
- if start_time:
- params["start_time"] = start_time
- if next_token:
- params["next_token"] = next_token
-
- response = self.client.search_recent_tweets(**params)
- request_count += 1
-
- if not response or not response.data:
- break
-
- all_tweets.extend(response.data)
- if response.includes and response.includes.get("users"):
- all_users.extend(response.includes["users"])
-
- if hasattr(response, "meta") and response.meta and response.meta.get("next_token"):
- next_token = response.meta["next_token"]
- else:
- break
-
- return PaginatedTwitterResponse(all_tweets, all_users, request_count)
-
- def get_user_timeline(self, username: str) -> list[Tweet]:
- from talos.utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
-
- user = self.get_user(username)
- if not user:
- return []
- response = self.client.get_users_tweets(
- id=user.id,
- tweet_fields=["author_id", "in_reply_to_user_id", "public_metrics", "referenced_tweets", "conversation_id", "created_at", "edit_history_tweet_ids"],
- user_fields=[
- "created_at",
- "public_metrics",
- "profile_image_url",
- "verified",
- "description",
- "location",
- "url",
- ],
- )
- return [self._convert_to_tweet_model(tweet) for tweet in (response.data or [])]
-
- def get_user_mentions(self, username: str) -> list[Tweet]:
- from talos.utils.validation import validate_twitter_username
- if not validate_twitter_username(username):
- raise ValueError(f"Invalid Twitter username: {username}")
-
- user = self.get_user(username)
- if not user:
- return []
- response = self.client.get_users_mentions(
- id=user.id,
- tweet_fields=["author_id", "in_reply_to_user_id", "public_metrics", "referenced_tweets", "conversation_id", "created_at", "edit_history_tweet_ids"],
- user_fields=[
- "created_at",
- "public_metrics",
- "profile_image_url",
- "verified",
- "description",
- "location",
- "url",
- ],
- )
- return [self._convert_to_tweet_model(tweet) for tweet in (response.data or [])]
-
- def get_tweet(self, tweet_id: int) -> Tweet:
- if not isinstance(tweet_id, int) or tweet_id <= 0:
- raise ValueError(f"Invalid tweet ID: {tweet_id}")
-
- response = self.client.get_tweet(
- str(tweet_id),
- tweet_fields=["author_id", "in_reply_to_user_id", "public_metrics", "referenced_tweets", "conversation_id", "created_at", "edit_history_tweet_ids"]
- )
- return self._convert_to_tweet_model(response.data)
-
- def get_sentiment(self, search_query: str = "talos") -> float:
- """
- Gets the sentiment of tweets that match a search query.
- """
- from talos.utils.validation import sanitize_user_input
- if not search_query or not search_query.strip():
- raise ValueError("Search query cannot be empty")
- search_query = sanitize_user_input(search_query, max_length=500)
-
- response = self.search_tweets(search_query)
- sentiment = 0
- if response.data:
- for tweet in response.data:
- analysis = TextBlob(tweet.text)
- sentiment += analysis.sentiment.polarity
- return sentiment / len(response.data)
- return 0
-
- def post_tweet(self, tweet: str) -> Any:
- from talos.utils.validation import sanitize_user_input
- if not tweet or not tweet.strip():
- raise ValueError("Tweet content cannot be empty")
- if len(tweet) > 280:
- raise ValueError("Tweet content exceeds 280 characters")
- tweet = sanitize_user_input(tweet, max_length=280)
- return self.client.create_tweet(text=tweet)
-
- def reply_to_tweet(self, tweet_id: str, tweet: str) -> Any:
- from talos.utils.validation import sanitize_user_input
- if not tweet_id or not tweet_id.strip():
- raise ValueError("Tweet ID cannot be empty")
- if not tweet or not tweet.strip():
- raise ValueError("Tweet content cannot be empty")
- if len(tweet) > 280:
- raise ValueError("Tweet content exceeds 280 characters")
- tweet = sanitize_user_input(tweet, max_length=280)
- return self.client.create_tweet(text=tweet, in_reply_to_tweet_id=tweet_id)
-
- def _convert_to_tweet_model(self, tweet_data: Any) -> Tweet:
- """Convert raw tweepy tweet data to Tweet BaseModel"""
- referenced_tweets = []
- if hasattr(tweet_data, 'referenced_tweets') and tweet_data.referenced_tweets:
- for ref in tweet_data.referenced_tweets:
- if isinstance(ref, dict):
- referenced_tweets.append(ReferencedTweet(
- type=ref.get('type', ''),
- id=ref.get('id', 0)
- ))
- else:
- referenced_tweets.append(ReferencedTweet(
- type=getattr(ref, 'type', ''),
- id=getattr(ref, 'id', 0)
- ))
-
- return Tweet(
- id=int(tweet_data.id),
- text=tweet_data.text,
- author_id=str(tweet_data.author_id),
- created_at=str(tweet_data.created_at) if hasattr(tweet_data, 'created_at') and tweet_data.created_at else None,
- conversation_id=str(tweet_data.conversation_id) if hasattr(tweet_data, 'conversation_id') and tweet_data.conversation_id else None,
- public_metrics=dict(tweet_data.public_metrics) if hasattr(tweet_data, 'public_metrics') and tweet_data.public_metrics else {},
- referenced_tweets=referenced_tweets if referenced_tweets else None,
- in_reply_to_user_id=str(tweet_data.in_reply_to_user_id) if hasattr(tweet_data, 'in_reply_to_user_id') and tweet_data.in_reply_to_user_id else None,
- edit_history_tweet_ids=[str(id) for id in tweet_data.edit_history_tweet_ids] if hasattr(tweet_data, 'edit_history_tweet_ids') and tweet_data.edit_history_tweet_ids else None
- )
diff --git a/src/talos/tools/twitter_evaluator.py b/src/talos/tools/twitter_evaluator.py
deleted file mode 100644
index 7b43a3f4..00000000
--- a/src/talos/tools/twitter_evaluator.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from abc import ABC, abstractmethod
-from datetime import datetime, timezone
-
-from talos.models.twitter import TwitterUser
-
-from ..models.evaluation import EvaluationResult
-
-
-class TwitterAccountEvaluator(ABC):
- @abstractmethod
- def evaluate(self, user: TwitterUser) -> EvaluationResult:
- pass
-
-
-class DefaultTwitterAccountEvaluator(TwitterAccountEvaluator):
- def evaluate(self, user: TwitterUser) -> EvaluationResult:
- # Follower/Following Ratio
- followers_count = user.public_metrics.followers_count
- following_count = user.public_metrics.following_count
- if following_count > 0:
- follower_following_ratio = followers_count / following_count
- else:
- follower_following_ratio = followers_count
-
- # Account Age
- account_age_days = (datetime.now(timezone.utc) - user.created_at).days
-
- # Verified Status
- is_verified = getattr(user, "verified", False)
-
- # Profile Image
- has_custom_profile_image = bool(user.profile_image_url)
-
- # Calculate score
- score = 0
- if follower_following_ratio > 1:
- score += 25
- if account_age_days > 365:
- score += 25
- if is_verified:
- score += 25
- if has_custom_profile_image:
- score += 25
-
- return EvaluationResult(
- score=score,
- additional_data={
- "follower_following_ratio": follower_following_ratio,
- "account_age_days": account_age_days,
- "is_verified": is_verified,
- "has_custom_profile_image": has_custom_profile_image,
- },
- )
diff --git a/src/talos/tools/web_search.py b/src/talos/tools/web_search.py
deleted file mode 100644
index f9dd3efa..00000000
--- a/src/talos/tools/web_search.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from duckduckgo_search import DDGS
-
-from talos.tools.base import BaseTool
-
-
-class WebSearchTool(BaseTool):
- def __init__(self):
- self.ddgs = DDGS()
- super().__init__(
- name="web_search",
- description="A tool for searching the web.",
- )
-
- def _run(self, query: str) -> str:
- return self.ddgs.text(query, max_results=5)
diff --git a/src/talos/utils/__init__.py b/src/talos/utils/__init__.py
deleted file mode 100644
index 553b83bf..00000000
--- a/src/talos/utils/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .rofl_client import RoflClient
-
-__all__ = ["RoflClient"]
diff --git a/src/talos/utils/arbiscan.py b/src/talos/utils/arbiscan.py
deleted file mode 100644
index 45d592e9..00000000
--- a/src/talos/utils/arbiscan.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import os
-import json
-from typing import Optional, Dict, Any
-
-from talos.models.arbiscan import ContractSourceCode, ContractABI, ArbiScanResponse, ArbiScanABIResponse
-from talos.utils.http_client import SecureHTTPClient
-
-
-class ArbiScanClient:
- """Client for interacting with Arbiscan API to get contract source code and ABI"""
-
- def __init__(self, api_key: Optional[str] = None, chain_id: int = 42161):
- """
- Initialize ArbiScan client
-
- Args:
- api_key: Optional API key for higher rate limits
- chain_id: Chain ID for the network (42161 for Arbitrum One, 42170 for Nova, 421614 for Sepolia)
- """
- self.api_key = api_key
- self.chain_id = chain_id
- self.base_url = "https://api.etherscan.io/v2/api"
-
- def _make_request(self, params: Dict[str, Any]) -> Dict[str, Any]:
- """Make a request to the Etherscan API"""
- params["chainid"] = self.chain_id
- if self.api_key:
- params["apikey"] = self.api_key
-
- http_client = SecureHTTPClient()
- response = http_client.get(self.base_url, params=params)
- return response.json()
-
- def get_contract_source_code(self, contract_address: str) -> ContractSourceCode:
- """
- Get the source code of a verified contract
-
- Args:
- contract_address: The contract address to get source code for
-
- Returns:
- ContractSourceCode object with the contract details
-
- Raises:
- ValueError: If contract is not verified or not found
- requests.RequestException: If API request fails
- """
- from talos.utils.validation import sanitize_user_input
- contract_address = sanitize_user_input(contract_address, max_length=100)
-
- params = {
- "module": "contract",
- "action": "getsourcecode",
- "address": contract_address
- }
-
- data = self._make_request(params)
- response = ArbiScanResponse.model_validate(data)
-
- if response.status != "1":
- raise ValueError(f"Failed to get contract source code: {response.message}")
-
- if isinstance(response.result, str):
- raise ValueError(f"API Error: {response.result}")
-
- if not response.result or not response.result[0].source_code:
- raise ValueError(f"Contract {contract_address} is not verified or does not exist")
-
- return response.result[0]
-
- def get_contract_abi(self, contract_address: str) -> ContractABI:
- """
- Get the ABI of a verified contract
-
- Args:
- contract_address: The contract address to get ABI for
-
- Returns:
- ContractABI object with parsed ABI
-
- Raises:
- ValueError: If contract is not verified or not found
- requests.RequestException: If API request fails
- """
- from talos.utils.validation import sanitize_user_input
- contract_address = sanitize_user_input(contract_address, max_length=100)
-
- params = {
- "module": "contract",
- "action": "getabi",
- "address": contract_address
- }
-
- data = self._make_request(params)
- response = ArbiScanABIResponse.model_validate(data)
-
- if response.status != "1":
- raise ValueError(f"Failed to get contract ABI: {response.message}")
-
- if "Missing/Invalid API Key" in response.result or "Invalid API Key" in response.result:
- raise ValueError(f"API Error: {response.result}")
-
- try:
- abi_data = json.loads(response.result)
- return ContractABI(abi=abi_data)
- except json.JSONDecodeError as e:
- raise ValueError(f"Invalid ABI format returned: {e}")
-
-
-def get_contract_source_code(contract_address: str, api_key: Optional[str] = None, chain_id: int = 42161) -> ContractSourceCode:
- """
- Get the source code of a verified contract
-
- Args:
- contract_address: The contract address to get source code for
- api_key: Optional API key for higher rate limits
- chain_id: Chain ID for the network (default: 42161 for Arbitrum One)
-
- Returns:
- ContractSourceCode object with the contract details
- """
- api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- client = ArbiScanClient(api_key=api_key, chain_id=chain_id)
- return client.get_contract_source_code(contract_address)
-
-
-def get_contract_abi(contract_address: str, api_key: Optional[str] = None, chain_id: int = 42161) -> ContractABI:
- """
- Get the ABI of a verified contract
-
- Args:
- contract_address: The contract address to get ABI for
- api_key: Optional API key for higher rate limits
- chain_id: Chain ID for the network (default: 42161 for Arbitrum One)
-
- Returns:
- ContractABI object with parsed ABI
- """
- api_key = api_key or os.getenv("ARBISCAN_API_KEY")
- client = ArbiScanClient(api_key=api_key, chain_id=chain_id)
- return client.get_contract_abi(contract_address)
diff --git a/src/talos/utils/contract_deployment.py b/src/talos/utils/contract_deployment.py
deleted file mode 100644
index 1d61cab6..00000000
--- a/src/talos/utils/contract_deployment.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-from eth_account import Account
-from eth_typing import HexStr
-from web3 import Web3
-
-from talos.models.contract_deployment import ContractDeploymentRequest, ContractDeploymentResult
-
-
-def calculate_contract_signature(bytecode: str, salt: str) -> str:
- """Calculate contract signature from bytecode and salt."""
- from Crypto.Hash import keccak
-
- clean_bytecode = bytecode.replace("0x", "")
- clean_salt = salt.replace("0x", "")
-
- combined = clean_bytecode + clean_salt
- keccak_hash = keccak.new(digest_bits=256)
- keccak_hash.update(combined.encode())
- signature = keccak_hash.hexdigest()
- return f"0x{signature}"
-
-
-def get_web3_connection(chain_id: int) -> Web3:
- """Get Web3 connection for the specified chain."""
- rpc_urls = {
- 1: "https://eth.llamarpc.com",
- 42161: "https://arb1.arbitrum.io/rpc",
- 42170: "https://nova.arbitrum.io/rpc",
- 421614: "https://sepolia-rollup.arbitrum.io/rpc",
- 137: "https://polygon-rpc.com",
- 10: "https://mainnet.optimism.io",
- }
-
- if chain_id not in rpc_urls:
- raise ValueError(f"Unsupported chain ID: {chain_id}")
-
- w3 = Web3(Web3.HTTPProvider(rpc_urls[chain_id]))
- if not w3.is_connected():
- raise ConnectionError(f"Failed to connect to chain {chain_id}")
-
- return w3
-
-
-def deploy_contract(request: ContractDeploymentRequest, private_key: str) -> ContractDeploymentResult:
- """Deploy a contract using CREATE2."""
- w3 = get_web3_connection(request.chain_id)
- account = Account.from_key(private_key)
-
- signature = calculate_contract_signature(request.bytecode, request.salt)
-
- constructor_data = ""
- if request.constructor_args:
- try:
- constructor_data = w3.codec.encode(["uint256[]"], [request.constructor_args]).hex()
- except Exception:
- constructor_data = ""
-
- deployment_bytecode = request.bytecode + constructor_data
-
- gas_limit = request.gas_limit
- if not gas_limit:
- try:
- gas_limit = w3.eth.estimate_gas({"data": HexStr(deployment_bytecode), "from": account.address})
- except Exception:
- gas_limit = 3000000
-
- transaction = {
- "data": deployment_bytecode,
- "gas": gas_limit,
- "gasPrice": request.gas_price or w3.eth.gas_price,
- "nonce": w3.eth.get_transaction_count(account.address),
- "chainId": request.chain_id,
- }
-
- signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
- tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
-
- receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
-
- return ContractDeploymentResult(
- contract_address=str(receipt["contractAddress"]),
- transaction_hash=tx_hash.hex(),
- contract_signature=signature,
- chain_id=request.chain_id,
- gas_used=receipt["gasUsed"],
- was_duplicate=False,
- )
diff --git a/src/talos/utils/dexscreener.py b/src/talos/utils/dexscreener.py
deleted file mode 100644
index a4b3f1f1..00000000
--- a/src/talos/utils/dexscreener.py
+++ /dev/null
@@ -1,23 +0,0 @@
-
-from talos.models.dexscreener import DexscreenerData
-from talos.utils.http_client import SecureHTTPClient
-
-
-class DexscreenerClient:
- def __init__(self, pair_address: str = "0xdaae914e4bae2aae4f536006c353117b90fb37e3"):
- from talos.utils.validation import sanitize_user_input
- self.pair_address = sanitize_user_input(pair_address, max_length=100)
-
- def get_talos_data(self) -> DexscreenerData:
- """Gets the OHLCV data for a token from dexscreener.com"""
- url = f"https://api.dexscreener.com/latest/dex/pairs/arbitrum/{self.pair_address}"
- http_client = SecureHTTPClient()
- response = http_client.get(url)
- data = response.json()
- return DexscreenerData.model_validate(data.get("pair", {}))
-
-
-def get_ohlcv_data(pair_address: str) -> DexscreenerData:
- """Gets the OHLCV data for a token from dexscreener.com"""
- client = DexscreenerClient(pair_address)
- return client.get_talos_data()
diff --git a/src/talos/utils/geckoterminal.py b/src/talos/utils/geckoterminal.py
deleted file mode 100644
index 37bea49d..00000000
--- a/src/talos/utils/geckoterminal.py
+++ /dev/null
@@ -1,34 +0,0 @@
-
-from talos.models.gecko_terminal import OHLCV, GeckoTerminalOHLCVData
-from talos.utils.http_client import SecureHTTPClient
-
-
-class GeckoTerminalClient:
- def __init__(self, network: str = "arbitrum", pool_address: str = "0xdaAe914e4Bae2AAe4f536006C353117B90Fb37e3"):
- from talos.utils.validation import sanitize_user_input
- self.network = sanitize_user_input(network, max_length=50)
- self.pool_address = sanitize_user_input(pool_address, max_length=100)
-
- def get_ohlcv_data(self, timeframe: str = "hour") -> GeckoTerminalOHLCVData:
- """Gets the OHLCV data for a token from geckoterminal.com"""
- from talos.utils.validation import sanitize_user_input
- timeframe = sanitize_user_input(timeframe, max_length=20)
-
- url = (
- f"https://api.geckoterminal.com/api/v2/networks/{self.network}/pools/{self.pool_address}/ohlcv/{timeframe}"
- )
- http_client = SecureHTTPClient()
- response = http_client.get(url, headers={"accept": "application/json"})
- data = response.json()
- ohlcv_list = [
- OHLCV(
- timestamp=item[0],
- open=item[1],
- high=item[2],
- low=item[3],
- close=item[4],
- volume=item[5],
- )
- for item in data["data"]["attributes"]["ohlcv_list"]
- ]
- return GeckoTerminalOHLCVData(ohlcv_list=ohlcv_list)
diff --git a/src/talos/utils/http_client.py b/src/talos/utils/http_client.py
deleted file mode 100644
index dddb90bf..00000000
--- a/src/talos/utils/http_client.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import requests
-import logging
-from typing import Dict, Any, Optional
-from requests.adapters import HTTPAdapter # type: ignore
-from urllib3.util.retry import Retry
-
-logger = logging.getLogger(__name__)
-
-class SecureHTTPClient:
- """Secure HTTP client with proper timeouts, retries, and error handling."""
-
- def __init__(self, timeout: int = 30, max_retries: int = 3):
- self.timeout = timeout
- self.session = requests.Session()
-
- retry_strategy = Retry(
- total=max_retries,
- status_forcelist=[429, 500, 502, 503, 504],
- allowed_methods=["HEAD", "GET", "OPTIONS"],
- backoff_factor=1
- )
-
- adapter = HTTPAdapter(max_retries=retry_strategy)
- self.session.mount("http://", adapter)
- self.session.mount("https://", adapter)
-
- def get(self, url: str, headers: Optional[Dict[str, str]] = None, **kwargs) -> requests.Response:
- """Secure GET request with proper error handling."""
- try:
- response = self.session.get(
- url,
- headers=headers,
- timeout=self.timeout,
- verify=True,
- **kwargs
- )
- response.raise_for_status()
- return response
- except requests.exceptions.Timeout:
- logger.error(f"Request timeout for URL: {url}")
- raise
- except requests.exceptions.SSLError:
- logger.error(f"SSL verification failed for URL: {url}")
- raise
- except requests.exceptions.RequestException as e:
- logger.error(f"HTTP request failed for URL: {url}, Error: {e}")
- raise
-
- def post(self, url: str, data: Any = None, json: Any = None, headers: Optional[Dict[str, str]] = None, **kwargs) -> requests.Response:
- """Secure POST request with proper error handling."""
- try:
- response = self.session.post(
- url,
- data=data,
- json=json,
- headers=headers,
- timeout=self.timeout,
- verify=True,
- **kwargs
- )
- response.raise_for_status()
- return response
- except requests.exceptions.RequestException as e:
- logger.error(f"HTTP POST request failed for URL: {url}, Error: {e}")
- raise
diff --git a/src/talos/utils/llm.py b/src/talos/utils/llm.py
deleted file mode 100644
index 5626f4d5..00000000
--- a/src/talos/utils/llm.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from openai import OpenAI
-from openai.types.chat import ChatCompletionToolParam
-
-from talos.tools.web_search import WebSearchTool
-
-
-class LLMClient:
- def __init__(self, api_key: str):
- self.client = OpenAI(api_key=api_key)
- self.web_search_tool = WebSearchTool()
-
- def reasoning(self, prompt: str, model: str = "o4-mini", web_search: bool = False) -> str:
- tools: list[ChatCompletionToolParam] = []
- if web_search:
- tools.append(
- {"type": "web_search_preview"} # type: ignore
- )
- response = self.client.chat.completions.create(
- model=model,
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": prompt},
- ],
- tools=tools,
- )
- content = response.choices[0].message.content
- if content is None:
- raise ValueError("The response from the LLM was empty.")
- return content
-
- def deep_research(self, query: str, model: str = "o3-deep-research") -> str:
- search_results = self.web_search_tool.run(query)
- prompt = f"Based on the following search results, please answer the query: {query}\n\nSearch results:\n{search_results}"
- return self.reasoning(prompt, model)
diff --git a/src/talos/utils/memory_combiner.py b/src/talos/utils/memory_combiner.py
deleted file mode 100644
index d49da6f7..00000000
--- a/src/talos/utils/memory_combiner.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""LLM-based memory combining utility for intelligent memory fusion."""
-
-import os
-from typing import Union
-
-from langchain_core.prompts import PromptTemplate
-from langchain_openai import ChatOpenAI
-from pydantic import SecretStr
-
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
-
-class MemoryCombiner:
- """Utility for combining similar memories using LLM-based intelligent fusion."""
-
- def __init__(self, model: str = "gpt-4o-mini", verbose: Union[bool, int] = False):
- api_key = os.getenv("OPENAI_API_KEY")
- self.llm = ChatOpenAI(model=model, api_key=SecretStr(api_key) if api_key else None)
- self.prompt_manager = FilePromptManager("src/talos/prompts")
- self.verbose = verbose
-
- def _get_verbose_level(self) -> int:
- """Convert verbose to integer level for backward compatibility."""
- if isinstance(self.verbose, bool):
- return 1 if self.verbose else 0
- return max(0, min(2, self.verbose))
-
- def combine_memories(self, existing_memory: str, new_memory: str) -> str:
- """
- Combine two similar memories into a single coherent memory using LLM.
-
- Args:
- existing_memory: The existing memory description
- new_memory: The new memory description to combine
-
- Returns:
- Combined memory description as a single coherent sentence
- """
- try:
- prompt = self.prompt_manager.get_prompt("memory_combiner_prompt")
- if not prompt:
- if self._get_verbose_level() >= 1:
- print("\033[33m⚠️ Memory combiner prompt not found, falling back to concatenation\033[0m")
- return f"{existing_memory}; {new_memory}"
-
- prompt_template = PromptTemplate(
- template=prompt.template,
- input_variables=prompt.input_variables,
- )
-
- chain = prompt_template | self.llm
- response = chain.invoke({
- "existing_memory": existing_memory,
- "new_memory": new_memory
- })
-
- combined = str(response.content).strip() if response.content else ""
- if self._get_verbose_level() >= 1:
- print(f"\033[36m🤖 LLM combined memories: {combined}\033[0m")
-
- return combined if combined else f"{existing_memory}; {new_memory}"
-
- except Exception as e:
- if self._get_verbose_level() >= 1:
- print(f"\033[33m⚠️ LLM memory combining failed ({e}), falling back to concatenation\033[0m")
- return f"{existing_memory}; {new_memory}"
diff --git a/src/talos/utils/rofl_client.py b/src/talos/utils/rofl_client.py
deleted file mode 100644
index a4ce9658..00000000
--- a/src/talos/utils/rofl_client.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import json
-import logging
-from typing import Any
-
-import httpx
-from eth_rpc import PrivateKeyWallet
-from eth_rpc.networks import Arbitrum
-from eth_typing import HexStr
-
-# Configure logging
-logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
-logger = logging.getLogger(__name__)
-
-
-class RoflClient:
- """Utility for interacting with ROFL runtime services.
-
- Provides methods for key fetching and transaction submission
- through the ROFL application daemon.
- """
-
- ROFL_SOCKET_PATH: str = "/run/rofl-appd.sock"
-
- def __init__(self, url: str = "") -> None:
- """Initialize ROFL utility.
-
- Args:
- url: Optional URL for HTTP transport (defaults to socket)
- """
- self.url: str = url
-
- async def _appd_post(self, path: str, payload: Any) -> Any:
- """Post request to ROFL application daemon.
-
- Args:
- path: API endpoint path
- payload: JSON payload to send
-
- Returns:
- JSON response from the daemon
-
- Raises:
- httpx.HTTPStatusError: If the request fails
- PermissionError: If unable to access the ROFL socket
- """
- import os
-
- transport: httpx.AsyncHTTPTransport | None = None
- socket_path = self.url if self.url and not self.url.startswith("http") else self.ROFL_SOCKET_PATH
-
- # Check if socket exists and is accessible
- if not os.path.exists(socket_path):
- raise PermissionError(
- f"ROFL socket not found at {socket_path}. Ensure ROFL daemon is running and socket is mounted."
- )
-
- if not os.access(socket_path, os.R_OK | os.W_OK):
- raise PermissionError(
- f"Permission denied accessing ROFL socket at {socket_path}. Check socket permissions."
- )
-
- if self.url and not self.url.startswith("http"):
- transport = httpx.AsyncHTTPTransport(uds=self.url)
- logger.debug(f"Using HTTP socket: {self.url}")
- elif not self.url:
- transport = httpx.AsyncHTTPTransport(uds=self.ROFL_SOCKET_PATH)
- logger.debug(f"Using unix domain socket: {self.ROFL_SOCKET_PATH}")
-
- async with httpx.AsyncClient(transport=transport) as client:
- base_url: str = self.url if self.url and self.url.startswith("http") else "http://localhost"
- full_url: str = base_url + path
- logger.debug(f"Posting to {full_url}: {json.dumps(payload)}")
- response: httpx.Response = await client.post(full_url, json=payload, timeout=60.0)
- response.raise_for_status()
- return response.json()
-
- async def generate_key(self, key_id: str) -> HexStr:
- """Fetch or generate a cryptographic key from ROFL.
-
- Args:
- key_id: Identifier for the key
-
- Returns:
- The private key as a hex string
-
- Raises:
- httpx.HTTPStatusError: If key fetch fails
- """
- payload: dict[str, str] = {"key_id": key_id, "kind": "secp256k1"}
-
- path: str = "/rofl/v1/keys/generate"
- response: dict[str, Any] = await self._appd_post(path, payload)
- return HexStr(response["key"])
-
- async def get_wallet(self, wallet_id: str) -> PrivateKeyWallet:
- """Get a wallet from ROFL.
-
- Args:
- wallet_id: Identifier for the wallet
-
- Returns:
- The wallet as a PrivateKeyWallet
- """
- key = await self.generate_key(wallet_id)
- return PrivateKeyWallet[Arbitrum](private_key=key) # type: ignore
diff --git a/src/talos/utils/validation.py b/src/talos/utils/validation.py
deleted file mode 100644
index b10985d6..00000000
--- a/src/talos/utils/validation.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import re
-import logging
-
-logger = logging.getLogger(__name__)
-
-def validate_github_username(username: str) -> bool:
- """Validate GitHub username format."""
- if not username or len(username) > 39:
- return False
- return re.match(r'^[a-zA-Z0-9]([a-zA-Z0-9_-]*[a-zA-Z0-9])?$', username) is not None
-
-def validate_github_repo_name(repo_name: str) -> bool:
- """Validate GitHub repository name format."""
- if not repo_name or len(repo_name) > 100:
- return False
- return re.match(r'^[a-zA-Z0-9._-]+$', repo_name) is not None
-
-def validate_twitter_username(username: str) -> bool:
- """Validate Twitter username format."""
- if not username or len(username) > 15:
- return False
- return re.match(r'^[a-zA-Z0-9_]+$', username) is not None
-
-def sanitize_user_input(input_str: str, max_length: int = 1000) -> str:
- """Sanitize user input by removing potentially dangerous characters."""
- if not input_str:
- return ""
- sanitized = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', input_str)
- return sanitized[:max_length]
-
-def validate_api_token_format(token: str, token_type: str) -> bool:
- """Validate API token format based on type."""
- if not token:
- return False
-
- patterns = {
- 'github': r'^(ghp_|gho_|ghu_|ghs_|ghr_)[a-zA-Z0-9]{36}$|^github_pat_[a-zA-Z0-9]{82}$',
- 'openai': r'^sk-[a-zA-Z0-9]{48}$',
- 'twitter': r'^[a-zA-Z0-9]{25}$'
- }
-
- pattern = patterns.get(token_type.lower())
- if pattern:
- return re.match(pattern, token) is not None
- return len(token) >= 10
-
-def mask_sensitive_data(data: str, mask_char: str = '*') -> str:
- """Mask sensitive data for logging."""
- if len(data) <= 8:
- return mask_char * len(data)
- return data[:4] + mask_char * (len(data) - 8) + data[-4:]
diff --git a/src/thread_sentiment/__init__.py b/src/thread_sentiment/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/thread_sentiment/main.py b/src/thread_sentiment/main.py
deleted file mode 100644
index be863d90..00000000
--- a/src/thread_sentiment/main.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import tweepy
-from langchain_core.messages import AIMessage
-from langchain_openai import ChatOpenAI
-
-from talos.core.agent import Agent
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.settings import TwitterOAuthSettings
-
-from .twitter import get_all_replies
-
-prompt_manager = FilePromptManager("src/talos/prompts")
-
-
-def post_question():
- """
- Posts a tweet to Twitter asking for crypto market sentiment.
- The tweet ID is saved to a file to be used by the analysis function.
- """
- settings = TwitterOAuthSettings()
-
- auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
- auth.set_access_token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)
- api = tweepy.API(auth)
-
- tweet = api.update_status("What is your current sentiment about crypto markets today and why?")
-
- with open("tweet_id.txt", "w") as f:
- f.write(str(tweet.id))
-
-
-def analyze_sentiment(tweets: list[dict]) -> str:
- """
- Analyzes the sentiment of a list of tweets and returns a summary.
- """
- agent = Agent(model=ChatOpenAI(model="gpt-4"), prompt_manager=prompt_manager, schema=None)
- response = agent.run(message="", history=[], tweets=str(tweets))
- if isinstance(response, AIMessage):
- return str(response.content)
- return str(response)
-
-
-def analyze_and_post_sentiment():
- """
- Analyzes the replies to the tweet posted by post_question() and posts a sentiment analysis summary.
- """
- settings = TwitterOAuthSettings()
-
- auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
- auth.set_access_token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)
- api = tweepy.API(auth)
-
- with open("tweet_id.txt", "r") as f:
- tweet_id = f.read()
-
- tweets = get_all_replies(api, tweet_id)
- sentiment = analyze_sentiment(tweets)
-
- api.update_status(sentiment)
diff --git a/src/thread_sentiment/twitter.py b/src/thread_sentiment/twitter.py
deleted file mode 100644
index 6a486d25..00000000
--- a/src/thread_sentiment/twitter.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import tweepy
-
-
-def get_all_replies(api: tweepy.API, tweet_id: str) -> list[dict]:
- """
- Gets all replies to a tweet and returns a list of dictionaries, where each dictionary contains the tweet text and the follower count of the author.
- """
- # This is a simplified implementation. A real implementation would need to handle pagination.
- replies = tweepy.Cursor(api.search_tweets, q=f"conversation_id:{tweet_id}", tweet_mode="extended").items()
- return [{"text": reply.full_text, "followers": reply.user.followers_count} for reply in replies]
diff --git a/startup_tasks/18ebceaf6799.py b/startup_tasks/18ebceaf6799.py
deleted file mode 100644
index f0f6e439..00000000
--- a/startup_tasks/18ebceaf6799.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Startup task: Health Check (Recurring)
-Generated on: 2025-08-04T04:09:38.972016
-Hash: 18ebceaf6799
-"""
-
-from talos.core.startup_task import StartupTask
-import logging
-from datetime import datetime
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-
-class HealthCheckTask(StartupTask):
- """Recurring health check startup task."""
-
- async def run(self, **kwargs: Any) -> str:
- """Perform health check."""
- logger.info("Running startup health check")
-
- health_status = {
- "timestamp": datetime.now().isoformat(),
- "startup_system": "healthy",
- "task_manager": "operational"
- }
-
- logger.info(f"Startup health check completed: {health_status}")
- return f"Startup health check completed: {health_status['startup_system']}"
-
-
-def create_task() -> HealthCheckTask:
- """Create and return the startup task instance."""
- return HealthCheckTask(
- name="startup_health_check",
- description="Recurring health check for daemon startup components",
- task_hash="18ebceaf6799",
- enabled=True,
- cron_expression="*/5 * * * *"
- )
diff --git a/startup_tasks/__init__.py b/startup_tasks/__init__.py
deleted file mode 100644
index bb1c6051..00000000
--- a/startup_tasks/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Startup tasks directory - individual task files with hash-based names."""
diff --git a/startup_tasks/completed_tasks.json b/startup_tasks/completed_tasks.json
deleted file mode 100644
index 8221823f..00000000
--- a/startup_tasks/completed_tasks.json
+++ /dev/null
@@ -1,16 +0,0 @@
-[
- {
- "task_hash": "ec68f0115789",
- "name": "system_initialization",
- "executed_at": "2025-08-04T04:15:10.289582",
- "status": "completed",
- "error": null
- },
- {
- "task_hash": "eccaf09839f2",
- "name": "deploy_talos_governance",
- "executed_at": "2025-08-04T04:15:10.289965",
- "status": "completed",
- "error": null
- }
-]
\ No newline at end of file
diff --git a/startup_tasks/ec68f0115789.py b/startup_tasks/ec68f0115789.py
deleted file mode 100644
index 7639c4c2..00000000
--- a/startup_tasks/ec68f0115789.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Startup task: System Initialization
-Generated on: 2025-08-04T04:09:38.971991
-Hash: ec68f0115789
-"""
-
-from talos.core.startup_task import StartupTask
-import logging
-from datetime import datetime
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-
-class SystemInitTask(StartupTask):
- """System initialization startup task."""
-
- async def run(self, **kwargs: Any) -> str:
- """Initialize system components."""
- logger.info("Running system initialization task")
-
- initialization_steps = [
- "Verify environment variables",
- "Check database connectivity",
- "Initialize crypto keys",
- "Validate API endpoints"
- ]
-
- for step in initialization_steps:
- logger.info(f"Initialization step: {step}")
-
- completion_time = datetime.now()
- logger.info(f"System initialization completed at {completion_time}")
- return f"System initialization completed at {completion_time}"
-
-
-def create_task() -> SystemInitTask:
- """Create and return the startup task instance."""
- return SystemInitTask(
- name="system_initialization",
- description="Initialize system components and verify configuration",
- task_hash="ec68f0115789",
- enabled=True
- )
diff --git a/startup_tasks/eccaf09839f2.py b/startup_tasks/eccaf09839f2.py
deleted file mode 100644
index f81c7939..00000000
--- a/startup_tasks/eccaf09839f2.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Startup task: Contract Deployment
-Generated on: 2025-08-04T04:09:38.972009
-Hash: eccaf09839f2
-"""
-
-from talos.core.startup_task import StartupTask
-import logging
-from datetime import datetime
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-
-class ContractDeployTask(StartupTask):
- """Contract deployment startup task."""
-
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- self._contract_name = "TalosGovernance"
- self._private_key_env = "TALOS_DEPLOY_KEY"
-
- @property
- def contract_name(self) -> str:
- return self._contract_name
-
- @property
- def private_key_env(self) -> str:
- return self._private_key_env
-
- async def run(self, **kwargs: Any) -> str:
- """Deploy a smart contract."""
- logger.info(f"Deploying contract: {self.contract_name}")
-
- deployment_steps = [
- f"Load private key from {self.private_key_env}",
- f"Compile {self.contract_name} contract",
- "Deploy to network",
- "Verify deployment"
- ]
-
- for step in deployment_steps:
- logger.info(f"Contract deployment step: {step}")
-
- completion_time = datetime.now()
- logger.info(f"Contract {self.contract_name} deployed at {completion_time}")
- return f"Contract {self.contract_name} deployed successfully at {completion_time}"
-
-
-def create_task() -> ContractDeployTask:
- """Create and return the startup task instance."""
- return ContractDeployTask(
- name="deploy_talos_governance",
- description="Deploy TalosGovernance contract using private key from TALOS_DEPLOY_KEY",
- task_hash="eccaf09839f2",
- enabled=True
- )
diff --git a/taproot_reincarnation_kit/README.md b/taproot_reincarnation_kit/README.md
new file mode 100644
index 00000000..784158a7
--- /dev/null
+++ b/taproot_reincarnation_kit/README.md
@@ -0,0 +1,81 @@
+# Taproot Reincarnation Kit
+
+Esta é uma ferramenta de linha de comando (CLI) para demonstrar a criação de um endereço Bitcoin Taproot (P2TR) com uma condição de gasto baseada em um script leaf. Especificamente, ela cria um endereço que só pode ser gasto se a transação incluir uma assinatura válida e uma mensagem secreta específica.
+
+O conceito, "reencarnação on-chain", é uma forma poética de descrever como uma identidade (representada por uma chave privada) pode se manifestar na blockchain através da revelação de uma mensagem secreta.
+
+## Conceito
+
+O endereço Taproot gerado tem duas condições de gasto (paths):
+
+1. **Key Path (Caminho da Chave):** O gasto padrão, usando a chave de saída Taproot. Este script não foca neste caminho.
+2. **Script Path (Caminho do Script):** Um caminho de gasto alternativo que requer a revelação de um script. Neste caso, o script exige:
+ - Uma assinatura válida da chave interna (a "alma").
+ - A revelação da mensagem secreta (ex: "Eu reencarnei na blockchain").
+
+Este kit gera todos os componentes necessários para criar e (teoricamente) gastar de tal endereço.
+
+## Instalação
+
+### Pré-requisitos
+- Python 3.8 ou superior
+
+### Passos
+
+1. **Clone o repositório (ou tenha acesso a esta pasta):**
+ ```bash
+ # Exemplo, se o projeto estivesse em um repositório git
+ # git clone https://github.com/your-username/taproot-reincarnation-kit.git
+ # cd taproot-reincarnation-kit
+ ```
+
+2. **Instale o pacote em modo editável:**
+ ```bash
+ pip install -e .
+ ```
+ Isso instalará a dependência (`ecdsa`) e criará o comando `reincarnate-cli` no seu ambiente.
+
+## Uso
+
+Após a instalação, você pode usar a ferramenta `reincarnate-cli` no seu terminal.
+
+### Gerar um Novo Endereço com uma Nova Chave
+
+Simplesmente execute o comando sem argumentos para gerar uma nova chave secreta e o endereço correspondente com a mensagem padrão:
+```bash
+reincarnate-cli
+```
+
+### Usar uma Chave Secreta Específica
+
+Você pode fornecer sua própria chave secreta de 32 bytes em formato hexadecimal:
+```bash
+reincarnate-cli --secret-key sua_chave_secreta_hex
+```
+
+### Usar uma Mensagem Customizada
+
+Você pode especificar a mensagem secreta que será usada no script leaf:
+```bash
+reincarnate-cli --message "Minha mensagem secreta aqui"
+```
+
+### Combinar Opções
+
+```bash
+reincarnate-cli --secret-key sua_chave_secreta_hex --message "Outra mensagem"
+```
+
+## Saída do Script
+
+A ferramenta irá imprimir no console:
+- A chave secreta (se gerada) e a chave pública interna.
+- O hash da mensagem secreta.
+- O script leaf em hexadecimal.
+- O hash do TapLeaf.
+- O tweak Taproot.
+- A chave de saída Taproot.
+- O endereço final `bc1p...` (bech32m).
+- Um exemplo do *witness* necessário para gastar os fundos, que inclui a assinatura (de uma transação dummy), a mensagem, o script e o control block.
+
+**AVISO DE SEGURANÇA:** Este script é para fins educacionais. Não use as chaves geradas ou os endereços em um ambiente de produção com fundos reais sem um entendimento completo das implicações de segurança do Taproot e do Bitcoin.
\ No newline at end of file
diff --git a/taproot_reincarnation_kit/pyproject.toml b/taproot_reincarnation_kit/pyproject.toml
new file mode 100644
index 00000000..6f80fb1a
--- /dev/null
+++ b/taproot_reincarnation_kit/pyproject.toml
@@ -0,0 +1,24 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "taproot-reincarnation-kit"
+version = "1.0.0"
+authors = [
+ { name="Your Name", email="you@example.com" },
+]
+description = "Uma ferramenta para criar endereços Taproot com um script de 'reencarnação'."
+readme = "README.md"
+requires-python = ">=3.8"
+classifiers = [
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "ecdsa>=0.18.0",
+]
+
+[project.scripts]
+reincarnate-cli = "reincarnation_kit.main:main"
\ No newline at end of file
diff --git a/taproot_reincarnation_kit/reincarnation_kit/main.py b/taproot_reincarnation_kit/reincarnation_kit/main.py
new file mode 100644
index 00000000..8080e5ec
--- /dev/null
+++ b/taproot_reincarnation_kit/reincarnation_kit/main.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+import hashlib
+import os
+import sys
+import argparse
+from ecdsa import SECP256k1, SigningKey, VerifyingKey, util
+
+def main():
+ parser = argparse.ArgumentParser(description="Gera um endereço Taproot com um script de 'reencarnação'.")
+ parser.add_argument(
+ "-s", "--secret-key",
+ type=str,
+ help="A chave secreta em hexadecimal. Se não for fornecida, uma nova será gerada."
+ )
+ parser.add_argument(
+ "-m", "--message",
+ type=str,
+ default="Eu reencarnei na blockchain",
+ help="A mensagem secreta a ser usada no script leaf."
+ )
+ args = parser.parse_args()
+
+ # ---------- 1. Parâmetros da curva ----------
+curve = SECP256k1
+G = curve.generator
+p = curve.curve.p()
+n = curve.order
+
+def int_to_bytes(i, length):
+ return i.to_bytes(length, 'big')
+
+def bytes_to_int(b):
+ return int.from_bytes(b, 'big')
+
+ # ---------- 2. Chave interna (simulada ou gerada) ----------
+ if args.secret_key:
+ sk_int_hex = args.secret_key
+ print("🔑 Usando chave secreta fornecida.")
+ else:
+ sk_int_bytes = os.urandom(32)
+ sk_int_hex = sk_int_bytes.hex()
+ print("🔑 Gerando nova chave secreta.")
+
+ print(" Secret Key (hex):", sk_int_hex)
+ sk_int = int(sk_int_hex, 16)
+
+ vk_int = SigningKey.from_secret_exponent(sk_int, curve=curve).verifying_key
+ Q_int_bytes = b'\x02' + int_to_bytes(vk_int.pubkey.point.x(), 32) if vk_int.pubkey.point.y() % 2 == 0 else b'\x03' + int_to_bytes(vk_int.pubkey.point.x(), 32)
+ print(" Public Key (compressa):", Q_int_bytes.hex())
+
+ # ---------- 3. Mensagem e hash ----------
+ M = args.message.encode('utf-8')
+ hM = hashlib.sha256(M).digest()
+ print("\n🗒️ Mensagem:", args.message)
+ print(" Hash (SHA256):", hM.hex())
+
+ # ---------- 4. Script leaf ----------
+# Estrutura: OP_CHECKSIGVERIFY OP_PUSH32 OP_EQUAL
+# Opcodes:
+OP_CHECKSIGVERIFY = b'\xac' # 0xac
+OP_EQUAL = b'\x87' # 0x87
+OP_PUSH32 = b'\x20' # 0x20 (push next 32 bytes)
+
+leaf = (
+ Q_int_bytes + # 33 bytes (pubkey)
+ OP_CHECKSIGVERIFY + # 1 byte
+ OP_PUSH32 + hM + # 1 + 32 bytes
+ OP_EQUAL # 1 byte
+)
+print("\n📜 Script leaf (hex) :", leaf.hex())
+
+# ---------- 5. Hash da leaf (TapLeaf) ----------
+# TapLeaf = 0x00 || varint(len(leaf)) || leaf
+def varint_encode(i):
+ if i < 0xfd:
+ return i.to_bytes(1, 'little')
+ elif i <= 0xffff:
+ return b'\xfd' + i.to_bytes(2, 'little')
+ elif i <= 0xffffffff:
+ return b'\xfe' + i.to_bytes(4, 'little')
+ else:
+ return b'\xff' + i.to_bytes(8, 'little')
+
+leaf_hash = hashlib.sha256(
+ b'\xc0' + # 0xc0 = leaf version (BIP‑342)
+ varint_encode(len(leaf)) +
+ leaf
+).digest()
+print("\n🔎 TapLeaf hash (SHA256) :", leaf_hash.hex())
+
+# ---------- 6. Taproot tweak ----------
+# t = SHA256( tapTweak || X || leaf_hash ) where tapTweak = b'\x01'
+tap_tweak_prefix = b'\x01'
+tweak = hashlib.sha256(
+ tap_tweak_prefix +
+ Q_int_bytes +
+ leaf_hash
+).digest()
+t_int = bytes_to_int(tweak) % n
+print("\n🛠️ Tweak (int) :", t_int)
+
+# ---------- 7. Chave de saída (taproot) ----------
+# Q_out = Q_int + t·G (point addition)
+Q_int_point = vk_int.pubkey.point
+Q_out_point = Q_int_point + t_int * G
+
+# Codifica ponto comprimido
+Q_out_bytes = (b'\x02' if Q_out_point.y() % 2 == 0 else b'\x03') + int_to_bytes(Q_out_point.x(), 32)
+print("\n🔐 Chave de saída taproot (compressa) :", Q_out_bytes.hex())
+
+# ---------- 8. Endereço bech32m ----------
+# Bech32m encoding (BIP‑350). We'll implement a minimal encoder.
+CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
+
+def bech32_polymod(values):
+ """Internal function that computes the Bech32 checksum."""
+ GEN = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
+ chk = 1
+ for v in values:
+ b = (chk >> 25)
+ chk = ((chk & 0x1ffffff) << 5) ^ v
+ for i in range(5):
+ chk ^= GEN[i] if ((b >> i) & 1) else 0
+ return chk
+
+def bech32_hrp_expand(hrp):
+ return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
+
+def bech32_create_checksum(hrp, data, spec='bech32m'):
+ values = bech32_hrp_expand(hrp) + data
+ const = 0x2bc830a3 if spec == 'bech32m' else 1
+ polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ const
+ return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
+
+def bech32_encode(hrp, data, spec='bech32m'):
+ combined = data + bech32_create_checksum(hrp, data, spec)
+ return hrp + '1' + ''.join([CHARSET[d] for d in combined])
+
+def convertbits(data, frombits, tobits, pad=True):
+ """General power-of-2 base conversion."""
+ acc = 0
+ bits = 0
+ ret = []
+ maxv = (1 << tobits) - 1
+ for value in data:
+ if value < 0 or (value >> frombits):
+ raise ValueError("Invalid data range")
+ acc = (acc << frombits) | value
+ bits += frombits
+ while bits >= tobits:
+ bits -= tobits
+ ret.append((acc >> bits) & maxv)
+ if pad:
+ if bits:
+ ret.append((acc << (tobits - bits)) & maxv)
+ elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
+ raise ValueError("Invalid padding")
+ return ret
+
+# Witness program: version 1 (0x01) + 32‑byte x‑coordinate
+witver = 0x01
+witprog = Q_out_bytes[1:] # only x‑coordinate (32 bytes)
+bech32_data = [witver] + convertbits(witprog, 8, 5)
+address = bech32_encode('bc', bech32_data, spec='bech32m')
+print("\n🏁 Endereço Taproot (bech32m) :", address)
+
+# ---------- 9. Exemplo de witness (gasto) ----------
+# Para fins de demonstração vamos gerar uma assinatura válida da chave interna.
+# Em produção você usaria a chave privada real que controla Q_int.
+msg_hash = hashlib.sha256(b"dummy tx for sighash").digest() # placeholder
+# SIGHASH_ALL (0x01) – para o exemplo
+sighash = int.from_bytes(msg_hash, 'big')
+sig_obj = SigningKey.from_secret_exponent(sk_int, curve=curve).sign_digest(
+ msg_hash,
+ sigencode=util.sigencode_der_canonize
+) + b'\x01' # append SIGHASH_ALL
+
+# Montagem do witness stack:
+# 1) assinatura
+# 2) mensagem original (raw, não hash)
+# 3) script leaf (as above)
+# 4) control block (leaf_version || internal_key || merkle_path)
+leaf_version = b'\xc0' # 0xc0 = leaf version (BIP‑342)
+control_block = leaf_version + Q_int_bytes # sem merkle_path porque só há um leaf
+witness = [
+ sig_obj.hex(),
+ M.hex(),
+ leaf.hex(),
+ control_block.hex()
+]
+print("\n📦 Exemplo de witness (hex strings):")
+for i, w in enumerate(witness, 1):
+ print(f" [{i}] {w}")
+
+print("\n✅ Pronto! O endereço acima aceita o output Taproot. Quando quiser gastá‑lo, envie um transaction que contenha o witness acima.")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/test_declarative_standalone.py b/test_declarative_standalone.py
deleted file mode 100644
index 42674cea..00000000
--- a/test_declarative_standalone.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python3
-"""
-Standalone test for the new declarative prompt configuration system.
-This avoids circular import issues in the main test suite.
-"""
-
-import sys
-import os
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
-
-from talos.prompts.prompt_config import (
- PromptConfig,
- StaticPromptSelector,
- ConditionalPromptSelector
-)
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-import tempfile
-import json
-
-
-def test_static_prompt_selector():
- """Test static prompt selector (backward compatibility)."""
- print("Testing StaticPromptSelector...")
- selector = StaticPromptSelector(prompt_names=["test_prompt"])
- result = selector.select_prompts({})
- assert result == ["test_prompt"], f"Expected ['test_prompt'], got {result}"
- print("✓ StaticPromptSelector works correctly")
-
-
-def test_conditional_prompt_selector():
- """Test conditional prompt selector."""
- print("Testing ConditionalPromptSelector...")
- selector = ConditionalPromptSelector(
- conditions={"has_voice": "voice_prompt"},
- default_prompt="default_prompt"
- )
-
- result = selector.select_prompts({"has_voice": True})
- assert result == ["voice_prompt"], f"Expected ['voice_prompt'], got {result}"
-
- result = selector.select_prompts({})
- assert result == ["default_prompt"], f"Expected ['default_prompt'], got {result}"
- print("✓ ConditionalPromptSelector works correctly")
-
-
-def test_prompt_config():
- """Test PromptConfig functionality."""
- print("Testing PromptConfig...")
- config = PromptConfig(
- selector=StaticPromptSelector(prompt_names=["test_prompt"]),
- variables={"test_var": "test_value"}
- )
-
- result = config.get_prompt_names({})
- assert result == ["test_prompt"], f"Expected ['test_prompt'], got {result}"
- print("✓ PromptConfig works correctly")
-
-
-def test_file_prompt_manager_with_config():
- """Test FilePromptManager with declarative configuration."""
- print("Testing FilePromptManager with PromptConfig...")
-
- with tempfile.TemporaryDirectory() as temp_dir:
- prompt_file = os.path.join(temp_dir, "test_prompt.json")
- with open(prompt_file, 'w') as f:
- json.dump({
- "name": "test_prompt",
- "description": "Test prompt",
- "template": "Hello {name}, mode: {mode}!",
- "input_variables": ["name", "mode"]
- }, f)
-
- manager = FilePromptManager(prompts_dir=temp_dir)
-
- config = PromptConfig(
- selector=StaticPromptSelector(prompt_names=["test_prompt"]),
- variables={"name": "world", "mode": "test"},
- transformations={"mode": "uppercase"}
- )
-
- context = {}
- result = manager.get_prompt_with_config(config, context)
-
- assert result is not None, "Expected prompt result, got None"
- assert "Hello world, mode: TEST!" in result.template, f"Expected transformed template, got {result.template}"
- print("✓ FilePromptManager with PromptConfig works correctly")
-
-
-def test_variable_transformations():
- """Test variable transformations."""
- print("Testing variable transformations...")
-
- from talos.prompts.prompt_manager import PromptManager
-
- class DummyManager(PromptManager):
- def get_prompt(self, name): pass
- def get_prompt_with_config(self, config, context): pass
-
- manager = DummyManager()
-
- template = "Hello {name}, mode: {mode}!"
- variables = {"name": "world", "mode": "test"}
- transformations = {"mode": "uppercase"}
-
- result = manager.apply_variable_transformations(template, variables, transformations)
- expected = "Hello world, mode: TEST!"
- assert result == expected, f"Expected '{expected}', got '{result}'"
- print("✓ Variable transformations work correctly")
-
-
-def main():
- """Run all tests."""
- print("=== Testing Declarative Prompt Configuration System ===\n")
-
- try:
- test_static_prompt_selector()
- test_conditional_prompt_selector()
- test_prompt_config()
- test_file_prompt_manager_with_config()
- test_variable_transformations()
-
- print("\n🎉 All tests passed! The declarative prompt system is working correctly.")
- return 0
-
- except Exception as e:
- print(f"\n❌ Test failed: {e}")
- import traceback
- traceback.print_exc()
- return 1
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/disabled_test_agent.py b/tests/disabled_test_agent.py
deleted file mode 100644
index 0009b584..00000000
--- a/tests/disabled_test_agent.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import os
-from unittest.mock import patch
-
-from pydantic import BaseModel, Field
-
-os.environ["OPENAI_API_KEY"] = "test"
-from talos.agent import Agent
-
-
-class TestSchema(BaseModel):
- name: str = Field(description="The name of the person")
- age: int = Field(description="The age of the person")
-
-
-@patch("src.talos.agent.ChatOpenAI")
-class TestAgent:
- def test_agent(self, mock_chat_openai):
- # Configure the mock
- mock_instance = mock_chat_openai.return_value
- mock_instance.with_structured_output.return_value.invoke.return_value = TestSchema(name="John", age=30)
-
- # Create an instance of the Agent class
- agent = Agent(
- model="gpt-3.5-turbo",
- prompt_template="Generate a person with the name {name}.",
- schema=TestSchema,
- )
-
- # Run the agent
- response = agent.run(name="John")
-
- # Assert the response is as expected
- assert isinstance(response, TestSchema)
- assert response.name == "John"
- assert response.age == 30
diff --git a/tests/simple_supervisor.py b/tests/simple_supervisor.py
deleted file mode 100644
index 7ce53091..00000000
--- a/tests/simple_supervisor.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Any
-
-from talos.hypervisor.supervisor import Supervisor
-
-if TYPE_CHECKING:
- from talos.core.agent import Agent
-
-
-class SimpleSupervisor(Supervisor):
- """
- A simple supervisor that approves every other tool call.
- """
-
- counter: int = 0
-
- def set_agent(self, agent: "Agent"):
- """
- Sets the agent to be supervised.
- """
- pass
-
- def approve(self, action: str, args: dict[str, Any]) -> tuple[bool, str | None]:
- """
- Approves or denies an action.
- """
- self.counter += 1
- if self.counter % 2 == 0:
- return True, None
- return False, "Denied by SimpleSupervisor"
diff --git a/tests/test_agent.py b/tests/test_agent.py
deleted file mode 100644
index 92d41998..00000000
--- a/tests/test_agent.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from unittest.mock import MagicMock
-
-import pytest
-from langchain_core.language_models import BaseChatModel
-from langchain_core.messages import AIMessage, HumanMessage
-
-from talos.core.agent import Agent
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-
-
-class MockPromptManager(PromptManager):
- def get_prompt(self, name: str) -> Prompt | None:
- return Prompt(name="default", template="test template", input_variables=[])
-
- def get_prompt_with_config(self, config, context):
- return Prompt(name="default", template="test template", input_variables=[])
-
-
-@pytest.fixture
-def prompt_manager():
- return MockPromptManager()
-
-
-class MockChatModel(BaseChatModel):
- def _generate(self, messages, stop=None, run_manager=None, **kwargs):
- return MagicMock()
-
- def _llm_type(self):
- return "mock"
-
-
-def test_reset_history(prompt_manager):
- agent = Agent(model=MockChatModel(), prompt_manager=prompt_manager)
- agent.add_to_history([HumanMessage(content="hello")])
- assert len(agent.history) == 1
- agent.reset_history()
- assert len(agent.history) == 0
-
-
-def test_add_to_history(prompt_manager):
- agent = Agent(model=MockChatModel(), prompt_manager=prompt_manager)
- messages = [
- HumanMessage(content="hello"),
- AIMessage(content="hi there"),
- ]
- agent.add_to_history(messages)
- assert len(agent.history) == 2
- assert agent.history[0].content == "hello"
- assert agent.history[1].content == "hi there"
diff --git a/tests/test_cryptography.py b/tests/test_cryptography.py
deleted file mode 100644
index 36bedf36..00000000
--- a/tests/test_cryptography.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import base64
-import os
-import unittest
-
-from talos.services.key_management import KeyManagement
-from talos.skills.cryptography import CryptographySkill
-
-
-class CryptographyTest(unittest.TestCase):
- def setUp(self):
- self.key_dir = ".test_keys"
- self.km = KeyManagement(key_dir=self.key_dir)
- self.crypto_skill = CryptographySkill(key_management=self.km)
-
- def tearDown(self):
- if os.path.exists(self.key_dir):
- for f in os.listdir(self.key_dir):
- os.remove(os.path.join(self.key_dir, f))
- os.rmdir(self.key_dir)
-
- def test_key_generation(self):
- self.km.generate_keys()
- self.assertTrue(os.path.exists(self.km.private_key_path))
- self.assertTrue(os.path.exists(self.km.public_key_path))
-
- def test_encryption_decryption(self):
- message = "This is a secret message."
- public_key = self.km.get_public_key()
-
- encrypted = self.crypto_skill.run(data=message, public_key=base64.b64encode(public_key).decode())
-
- decrypted = self.crypto_skill.run(data=encrypted, decrypt=True)
-
- self.assertEqual(message, decrypted)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_data.py b/tests/test_data.py
deleted file mode 100644
index 8c47de41..00000000
--- a/tests/test_data.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from talos.data.dataset_manager import DatasetManager
-
-
-@pytest.fixture
-def dataset_manager():
- with patch("openai.OpenAI"), patch("openai.AsyncOpenAI"):
- manager = DatasetManager()
- return manager
-
-
-def test_add_dataset(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS") as mock_faiss:
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- assert "test_dataset" in dataset_manager.datasets
- mock_faiss.from_texts.assert_called_once()
-
-
-def test_add_duplicate_dataset(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS"):
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- with pytest.raises(ValueError):
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
-
-
-def test_remove_dataset(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS"):
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- dataset_manager.remove_dataset("test_dataset")
- assert "test_dataset" not in dataset_manager.datasets
-
-
-def test_remove_nonexistent_dataset(dataset_manager):
- with pytest.raises(ValueError):
- dataset_manager.remove_dataset("nonexistent_dataset")
-
-
-def test_get_dataset(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS"):
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- dataset = dataset_manager.get_dataset("test_dataset")
- assert dataset == ["doc1", "doc2"]
-
-
-def test_get_nonexistent_dataset(dataset_manager):
- with pytest.raises(ValueError):
- dataset_manager.get_dataset("nonexistent_dataset")
-
-
-def test_get_all_datasets(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS"):
- dataset_manager.add_dataset("dataset1", ["doc1", "doc2"])
- dataset_manager.add_dataset("dataset2", ["doc3", "doc4"])
- datasets = dataset_manager.get_all_datasets()
- assert len(datasets) == 2
- assert "dataset1" in datasets
- assert "dataset2" in datasets
-
-
-def test_search(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS") as mock_faiss:
- mock_vector_store = MagicMock()
- mock_vector_store.similarity_search.return_value = [MagicMock(page_content="doc1")]
- mock_faiss.from_texts.return_value = mock_vector_store
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- results = dataset_manager.search("fruit")
- assert isinstance(results, list)
- assert results == ["doc1"]
-
-
-def test_add_empty_dataset(dataset_manager):
- mock_embeddings = MagicMock()
- mock_embeddings.embed_documents.return_value = []
- dataset_manager.embeddings = mock_embeddings
- dataset_manager.add_dataset("empty_dataset", [])
- assert "empty_dataset" in dataset_manager.datasets
- assert dataset_manager.vector_store is None
-
-
-def test_search_with_empty_query(dataset_manager):
- with patch("talos.data.dataset_manager.FAISS"):
- dataset_manager.add_dataset("test_dataset", ["doc1", "doc2"])
- results = dataset_manager.search("")
- assert results == []
-
-
-def test_search_on_empty_dataset(dataset_manager):
- results = dataset_manager.search("query")
- assert results == []
diff --git a/tests/test_declarative_prompts.py b/tests/test_declarative_prompts.py
deleted file mode 100644
index d9202263..00000000
--- a/tests/test_declarative_prompts.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import pytest
-from unittest.mock import MagicMock, patch
-
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_config import (
- PromptConfig,
- StaticPromptSelector,
- ConditionalPromptSelector
-)
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-from talos.dag.nodes import PromptNode, GraphState
-
-PromptNode.model_rebuild()
-
-
-def test_static_prompt_selector():
- """Test static prompt selector (backward compatibility)."""
- selector = StaticPromptSelector(prompt_names=["test_prompt"])
- result = selector.select_prompts({})
- assert result == ["test_prompt"]
-
-
-def test_conditional_prompt_selector():
- """Test conditional prompt selector."""
- selector = ConditionalPromptSelector(
- conditions={"has_voice": "voice_prompt"},
- default_prompt="default_prompt"
- )
-
- result = selector.select_prompts({"has_voice": True})
- assert result == ["voice_prompt"]
-
- result = selector.select_prompts({})
- assert result == ["default_prompt"]
-
-
-def test_prompt_config_integration():
- """Test PromptConfig with PromptNode."""
- config = PromptConfig(
- selector=StaticPromptSelector(prompt_names=["test_prompt"]),
- variables={"test_var": "test_value"}
- )
-
- mock_manager = MagicMock(spec=FilePromptManager)
- mock_manager.get_prompt_with_config.return_value = Prompt(
- name="test", template="Test template", input_variables=[]
- )
-
- node = PromptNode(
- node_id="test_node",
- name="Test Node",
- prompt_manager=mock_manager,
- prompt_config=config
- )
-
- state: GraphState = {
- "messages": [],
- "context": {},
- "current_query": "test",
- "results": {},
- "metadata": {}
- }
-
- result = node.execute(state)
- assert "Applied prompt using declarative config" in result["results"]["test_node"]
-
-
-def test_prompt_node_validation():
- """Test that PromptNode requires either prompt_names or prompt_config."""
- mock_manager = MagicMock()
-
- with pytest.raises(ValueError, match="Either prompt_names or prompt_config must be provided"):
- PromptNode(
- node_id="test_node",
- name="Test Node",
- prompt_manager=mock_manager
- )
-
-
-def test_file_prompt_manager_with_config():
- """Test FilePromptManager with declarative config."""
- with patch("os.listdir", return_value=[]):
- manager = FilePromptManager(prompts_dir="dummy_dir")
-
- manager.prompts = {
- "test_prompt": Prompt(
- name="test_prompt",
- template="Hello {name}!",
- input_variables=["name"]
- )
- }
-
- config = PromptConfig(
- selector=StaticPromptSelector(prompt_names=["test_prompt"]),
- variables={"name": "world"}
- )
-
- result = manager.get_prompt_with_config(config, {})
- assert result is not None
- assert "Hello world!" in result.template
-
-
-def test_variable_transformations():
- """Test variable transformations in prompt manager."""
- with patch("os.listdir", return_value=[]):
- manager = FilePromptManager(prompts_dir="dummy_dir")
-
- template = "Mode: {mode}"
- variables = {"mode": "test"}
- transformations = {"mode": "uppercase"}
-
- result = manager.apply_variable_transformations(template, variables, transformations)
- assert result == "Mode: TEST"
diff --git a/tests/test_dexscreener.py b/tests/test_dexscreener.py
deleted file mode 100644
index 08e4a831..00000000
--- a/tests/test_dexscreener.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-from talos.tools.dexscreener import DexscreenerTool
-
-
-@patch("talos.utils.http_client.SecureHTTPClient.get")
-def test_dexscreener_tool(mock_get):
- mock_response = MagicMock()
- mock_response.status_code = 200
- mock_response.json.return_value = {
- "pair": {
- "priceUsd": "1.23",
- "priceChange": 1.59,
- "volume": 1000000
- }
- }
- mock_get.return_value = mock_response
- tool = DexscreenerTool()
- price_data = tool._run(token_address="0xdaae914e4bae2aae4f536006c353117b90fb37e3")
- assert price_data.price_usd == 1.23
- assert price_data.price_change_h24 == 1.59
- assert price_data.volume_h24 == 1000000
diff --git a/tests/test_document_loader.py b/tests/test_document_loader.py
deleted file mode 100644
index 0727517f..00000000
--- a/tests/test_document_loader.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import unittest
-from unittest.mock import Mock, patch
-
-from talos.data.dataset_manager import DatasetManager
-from talos.tools.document_loader import DocumentLoaderTool, DatasetSearchTool
-
-
-class TestDocumentLoader(unittest.TestCase):
- def setUp(self):
- with patch("openai.OpenAI"), patch("openai.AsyncOpenAI"):
- self.dataset_manager = DatasetManager()
- self.document_loader = DocumentLoaderTool(self.dataset_manager)
- self.dataset_search = DatasetSearchTool(self.dataset_manager)
-
- def test_is_ipfs_hash(self):
- self.assertTrue(self.document_loader._is_ipfs_hash("QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"))
- self.assertTrue(self.document_loader._is_ipfs_hash("ipfs://QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG"))
- self.assertFalse(self.document_loader._is_ipfs_hash("https://example.com/document.pdf"))
-
- @patch('talos.utils.http_client.SecureHTTPClient.get')
- def test_fetch_content_from_url_text(self, mock_get):
- mock_response = Mock()
- mock_response.headers = {'content-type': 'text/plain'}
- mock_response.text = "This is a test document."
- mock_response.raise_for_status.return_value = None
- mock_get.return_value = mock_response
-
- content = self.dataset_manager._fetch_content_from_url("https://example.com/test.txt")
- self.assertEqual(content, "This is a test document.")
-
- def test_clean_text(self):
- dirty_text = "This is a\n\n\n\ntest document."
- clean_text = self.dataset_manager._clean_text(dirty_text)
- self.assertEqual(clean_text, "This is a\n\ntest document.")
-
- def test_chunk_content(self):
- content = "This is sentence one. This is sentence two. This is sentence three."
- chunks = self.dataset_manager._process_and_chunk_content(content, chunk_size=30, chunk_overlap=10)
- self.assertTrue(len(chunks) > 1)
- self.assertTrue(all(len(chunk) <= 40 for chunk in chunks))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/tests/test_github_service.py b/tests/test_github_service.py
deleted file mode 100644
index caf662c9..00000000
--- a/tests/test_github_service.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import annotations
-
-import unittest
-from unittest.mock import MagicMock, patch
-
-from talos.services.implementations.github import GithubService
-
-
-class TestGithubService(unittest.TestCase):
- @patch("talos.services.implementations.github.GithubTools")
- @patch("talos.services.implementations.github.GithubPRReviewAgent")
- def test_review_pr(
- self,
- mock_github_pr_review_agent: MagicMock,
- mock_github_tools: MagicMock,
- ) -> None:
- # Arrange
- mock_github_tools_instance = mock_github_tools.return_value
- mock_github_tools_instance.get_pr_diff.return_value = "diff"
- mock_github_tools_instance.get_pr_comments.return_value = "comments"
- mock_github_tools_instance.get_pr_files.return_value = ["file1.py", "file2.py"]
-
- mock_agent_instance = mock_github_pr_review_agent.return_value
- mock_agent_instance.run.return_value = {"output": "feedback"}
-
- service = GithubService(token="test_token")
-
- # Act
- result = service.review_pr("test_user", "test_repo", 1)
-
- # Assert
- self.assertEqual(result, "feedback")
- mock_github_tools_instance.get_pr_diff.assert_called_once_with("test_user", "test_repo", 1)
- mock_github_tools_instance.get_pr_comments.assert_called_once_with("test_user", "test_repo", 1)
- mock_github_tools_instance.get_pr_files.assert_called_once_with("test_user", "test_repo", 1)
- mock_agent_instance.run.assert_called_once_with(
- input="Diff: diff\n\nComments: comments\n\nFiles: ['file1.py', 'file2.py']",
- user="test_user",
- project="test_repo",
- )
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_github_tools.py b/tests/test_github_tools.py
deleted file mode 100644
index 2e69026b..00000000
--- a/tests/test_github_tools.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from __future__ import annotations
-
-import unittest
-from unittest.mock import MagicMock, patch
-
-from talos.tools.github.tools import GithubTools
-
-
-class TestGithubTools(unittest.TestCase):
- @patch("talos.tools.github.tools.Github")
- def test_create_issue(self, mock_github: MagicMock) -> None:
- # Arrange
- mock_github_instance = mock_github.return_value
- mock_repo = MagicMock()
- mock_issue = MagicMock()
- mock_issue.number = 123
- mock_issue.title = "Test Issue"
- mock_issue.html_url = "http://example.com/issue/123"
- mock_repo.create_issue.return_value = mock_issue
- mock_github_instance.get_repo.return_value = mock_repo
-
- tools = GithubTools(token="test_token")
-
- # Act
- result = tools.create_issue(
- user="test_user",
- project="test_repo",
- title="Test Issue",
- body="This is a test issue.",
- )
-
- # Assert
- self.assertEqual(
- result,
- {
- "number": 123,
- "title": "Test Issue",
- "url": "http://example.com/issue/123",
- },
- )
- mock_github_instance.get_repo.assert_called_once_with("test_user/test_repo")
- mock_repo.create_issue.assert_called_once_with(title="Test Issue", body="This is a test issue.")
-
- @patch("talos.tools.github.tools.Github")
- def test_get_all_pull_requests(self, mock_github: MagicMock) -> None:
- # Arrange
- mock_github_instance = mock_github.return_value
- mock_repo = MagicMock()
- mock_pr = MagicMock()
- mock_pr.number = 1
- mock_pr.title = "Test PR"
- mock_pr.html_url = "http://example.com/pr/1"
- mock_repo.get_pulls.return_value = [mock_pr]
- mock_github_instance.get_repo.return_value = mock_repo
-
- tools = GithubTools(token="test_token")
-
- # Act
- result = tools.get_all_pull_requests(user="test_user", project="test_repo", state="all")
-
- # Assert
- self.assertEqual(
- result,
- [
- {
- "number": 1,
- "title": "Test PR",
- "url": "http://example.com/pr/1",
- }
- ],
- )
- mock_github_instance.get_repo.assert_called_once_with("test_user/test_repo")
- mock_repo.get_pulls.assert_called_once_with(state="all")
-
- @patch("talos.tools.github.tools.Github")
- def test_comment_on_pr(self, mock_github: MagicMock) -> None:
- # Arrange
- mock_github_instance = mock_github.return_value
- mock_repo = MagicMock()
- mock_pr = MagicMock()
- mock_repo.get_pull.return_value = mock_pr
- mock_github_instance.get_repo.return_value = mock_repo
-
- tools = GithubTools(token="test_token")
-
- # Act
- tools.comment_on_pr(user="test_user", project="test_repo", pr_number=1, comment="Test comment")
-
- # Assert
- mock_github_instance.get_repo.assert_called_once_with("test_user/test_repo")
- mock_repo.get_pull.assert_called_once_with(number=1)
- mock_pr.create_issue_comment.assert_called_once_with("Test comment")
-
- @patch("talos.tools.github.tools.Github")
- def test_approve_pr(self, mock_github: MagicMock) -> None:
- # Arrange
- mock_github_instance = mock_github.return_value
- mock_repo = MagicMock()
- mock_pr = MagicMock()
- mock_repo.get_pull.return_value = mock_pr
- mock_github_instance.get_repo.return_value = mock_repo
-
- tools = GithubTools(token="test_token")
-
- # Act
- tools.approve_pr(user="test_user", project="test_repo", pr_number=1)
-
- # Assert
- mock_github_instance.get_repo.assert_called_once_with("test_user/test_repo")
- mock_repo.get_pull.assert_called_once_with(number=1)
- mock_pr.create_review.assert_called_once_with(event="APPROVE")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_main_agent.py b/tests/test_main_agent.py
deleted file mode 100644
index b8f176a1..00000000
--- a/tests/test_main_agent.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langchain_core.language_models import BaseChatModel
-
-from talos.core.main_agent import MainAgent
-from talos.hypervisor.hypervisor import Hypervisor
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
-
-@pytest.fixture
-def mock_model() -> BaseChatModel:
- """
- Returns a mock of the BaseChatModel.
- """
- return MagicMock(spec=BaseChatModel)
-
-
-def test_main_agent_initialization(mock_model: BaseChatModel) -> None:
- """
- Tests that the MainAgent can be initialized without errors.
- """
- with (
- patch("talos.core.main_agent.FilePromptManager") as mock_file_prompt_manager,
- patch("talos.core.main_agent.Hypervisor") as mock_hypervisor,
- patch.dict(
- "os.environ",
- {
- "GITHUB_API_TOKEN": "test_token",
- "OPENAI_API_KEY": "test_key",
- "TWITTER_BEARER_TOKEN": "test_twitter_token",
- },
- ),
- patch("os.environ.get") as mock_os_get,
- patch("ssl.create_default_context", return_value=MagicMock()),
- patch("tweepy.Client"),
- patch("langchain_openai.ChatOpenAI"),
- ):
- mock_os_get.side_effect = lambda key, default=None: {
- "GITHUB_API_TOKEN": "test_token",
- "OPENAI_API_KEY": "test_key",
- "TWITTER_BEARER_TOKEN": "test_twitter_token",
- }.get(key, default)
- mock_prompt_manager = MagicMock(spec=FilePromptManager)
- mock_prompt_manager.get_prompt.return_value = Prompt(
- name="main_agent_prompt",
- template="You are a helpful assistant.",
- input_variables=[],
- )
- mock_file_prompt_manager.return_value = mock_prompt_manager
- mock_hypervisor.return_value = MagicMock(spec=Hypervisor)
-
- agent = MainAgent(
- model=mock_model,
- prompts_dir="",
- prompt_manager=mock_prompt_manager,
- schema=None,
- )
- assert agent is not None
- assert agent.model == mock_model
- assert agent.prompt_manager == mock_prompt_manager
- assert agent.skills is not None
- assert agent.services is not None
- assert agent.supervisor is not None
- assert agent.tool_manager is not None
- assert len(agent.tool_manager.tools) > 0
diff --git a/tests/test_memory_tool.py b/tests/test_memory_tool.py
deleted file mode 100644
index c99eabcc..00000000
--- a/tests/test_memory_tool.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import shutil
-import tempfile
-import unittest
-from pathlib import Path
-from unittest.mock import MagicMock, create_autospec
-
-from langchain_core.embeddings import Embeddings
-from langchain_core.language_models import BaseChatModel
-from langchain_core.runnables import Runnable
-
-from talos.core.memory import Memory
-from talos.tools.memory_tool import AddMemoryTool
-
-
-class TestAddMemoryTool(unittest.TestCase):
- def test_run(self):
- from talos.core.agent import Agent
-
- # Create a mock agent
- agent = MagicMock(spec=Agent)
- # Create the tool
- AddMemoryTool.model_rebuild()
- tool = AddMemoryTool(agent=agent)
- # Run the tool
- description = "This is a test memory."
- result = tool._run(description)
- # Check that the agent's add_memory method was called
- agent.add_memory.assert_called_once_with(description)
- self.assertEqual(result, f"Stored in memory: {description}")
-
-
-class TestAddMemoryToolIntegration(unittest.TestCase):
- def setUp(self):
- # Create a mock embeddings model
- class MockEmbeddings(Embeddings):
- def embed_documents(self, texts):
- return [[1.0] * 768 for _ in texts]
-
- def embed_query(self, text):
- return [1.0] * 768
-
- # Create a mock model
- mock_model = create_autospec(BaseChatModel)
- mock_model.with_structured_output.return_value = create_autospec(Runnable)
-
- # Create a temporary directory
- self.temp_dir = tempfile.mkdtemp()
-
- from talos.core.agent import Agent
- AddMemoryTool.model_rebuild(_types_namespace={'Agent': Agent})
-
- # Create a mock agent with file-based memory (not database)
- self.agent = Agent(
- model=mock_model,
- memory=Memory(
- file_path=Path(self.temp_dir) / "test_memory.json",
- embeddings_model=MockEmbeddings(),
- use_database=False, # Force file-based storage for consistent test behavior
- ),
- )
-
- def tearDown(self) -> None:
- # Remove the temporary directory
- shutil.rmtree(self.temp_dir)
-
- def test_run_integration(self):
- # Get the tool from the agent's tool manager
- tool = self.agent.tool_manager.get_tool("add_memory")
- self.assertIsInstance(tool, AddMemoryTool)
-
- # Run the tool
- description = "This is a test memory."
- result = tool._run(description)
- self.assertEqual(result, f"Stored in memory: {description}")
-
- # Check that the memory was added - just verify that memories exist
- memories = self.agent.memory.search(description)
- self.assertTrue(len(memories) > 0, "No memories found after adding memory - memory storage/retrieval is broken")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_prompt_concatenation.py b/tests/test_prompt_concatenation.py
deleted file mode 100644
index d5b50bc0..00000000
--- a/tests/test_prompt_concatenation.py
+++ /dev/null
@@ -1,106 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-import pytest
-from langchain_core.language_models import BaseChatModel
-
-from talos.core.main_agent import MainAgent
-from talos.hypervisor.hypervisor import Hypervisor
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
-
-@pytest.fixture
-def mock_model() -> BaseChatModel:
- """
- Returns a mock of the BaseChatModel.
- """
- return MagicMock(spec=BaseChatModel)
-
-
-def test_prompt_concatenation(mock_model: BaseChatModel) -> None:
- """
- Tests that the MainAgent can be initialized with a concatenated prompt.
- """
- with (
- patch("talos.core.main_agent.FilePromptManager") as mock_file_prompt_manager,
- patch("talos.core.main_agent.Hypervisor") as mock_hypervisor,
- patch.dict(
- "os.environ",
- {
- "GITHUB_API_TOKEN": "test_token",
- "OPENAI_API_KEY": "test_key",
- "TWITTER_BEARER_TOKEN": "test_twitter_token",
- },
- ),
- patch("os.environ.get") as mock_os_get,
- patch("ssl.create_default_context", return_value=MagicMock()),
- patch("tweepy.Client"),
- patch("langchain_openai.ChatOpenAI"),
- ):
- mock_os_get.side_effect = lambda key, default=None: {
- "GITHUB_API_TOKEN": "test_token",
- "OPENAI_API_KEY": "test_key",
- "TWITTER_BEARER_TOKEN": "test_twitter_token",
- }.get(key, default)
-
- # Create a mock FilePromptManager
- with patch("os.listdir", return_value=[]):
- mock_prompt_manager = FilePromptManager(prompts_dir="dummy_dir")
-
- # Add mock prompts
- mock_prompt_manager.prompts = {
- "main_agent_prompt": Prompt(
- name="main_agent_prompt",
- template="This is the main prompt.",
- input_variables=[],
- ),
- "general_agent_prompt": Prompt(
- name="general_agent_prompt",
- template="This is the general prompt.",
- input_variables=["time"],
- ),
- }
-
- mock_file_prompt_manager.return_value = mock_prompt_manager
- mock_hypervisor.return_value = MagicMock(spec=Hypervisor)
-
- MainAgent(
- model=mock_model,
- prompts_dir="",
- prompt_manager=mock_prompt_manager,
- schema=None,
- )
-
-
-def test_prompt_node_backward_compatibility(mock_model: BaseChatModel) -> None:
- """Test that PromptNode still works with legacy prompt_names."""
- from talos.dag.nodes import PromptNode, GraphState
- from talos.prompts.prompt_managers.file_prompt_manager import FilePromptManager
-
- PromptNode.model_rebuild()
-
- with patch("os.listdir", return_value=[]):
- mock_prompt_manager = FilePromptManager(prompts_dir="dummy_dir")
-
- mock_prompt_manager.prompts = {
- "test_prompt": Prompt(
- name="test_prompt",
- template="Test template",
- input_variables=[],
- )
- }
-
- node = PromptNode(
- node_id="test_node",
- name="Test Node",
- prompt_manager=mock_prompt_manager,
- prompt_names=["test_prompt"]
- )
-
- state: GraphState = {
- "messages": [], "context": {}, "current_query": "test",
- "results": {}, "metadata": {}
- }
-
- result = node.execute(state)
- assert "Applied prompt using prompt names" in result["results"]["test_node"]
diff --git a/tests/test_scheduled_jobs.py b/tests/test_scheduled_jobs.py
deleted file mode 100644
index 12f482f3..00000000
--- a/tests/test_scheduled_jobs.py
+++ /dev/null
@@ -1,266 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-from datetime import datetime, timedelta
-from typing import Optional
-from unittest.mock import patch
-
-import pytest
-from langchain_openai import ChatOpenAI
-
-from talos.core.main_agent import MainAgent
-from talos.core.scheduled_job import ScheduledJob
-from talos.core.job_scheduler import JobScheduler
-
-
-class MockScheduledJob(ScheduledJob):
- """Test implementation of ScheduledJob for testing purposes."""
-
- execution_count: int = 0
- last_execution: Optional[datetime] = None
-
- def __init__(self, name: str = "test_job", **kwargs):
- if 'cron_expression' not in kwargs and 'execute_at' not in kwargs:
- kwargs['cron_expression'] = "0 * * * *" # Every hour
-
- kwargs.setdefault('description', "Test scheduled job")
- kwargs.setdefault('execution_count', 0)
- kwargs.setdefault('last_execution', None)
-
- super().__init__(
- name=name,
- **kwargs
- )
-
- async def run(self, **kwargs) -> str:
- self.execution_count += 1
- self.last_execution = datetime.now()
- return f"Test job executed {self.execution_count} times"
-
-
-class MockOneTimeJob(ScheduledJob):
- """Test implementation of one-time ScheduledJob."""
-
- executed: bool = False
-
- def __init__(self, execute_at: datetime, **kwargs):
- super().__init__(
- name="one_time_test",
- description="One-time test job",
- execute_at=execute_at,
- executed=False,
- **kwargs
- )
-
- async def run(self, **kwargs) -> str:
- self.executed = True
- return "One-time job executed"
-
-
-class TestScheduledJobValidation:
- """Test ScheduledJob validation and configuration."""
-
- def test_cron_job_creation(self):
- """Test creating a job with cron expression."""
- job = MockScheduledJob(name="cron_test", cron_expression="0 9 * * *")
- assert job.name == "cron_test"
- assert job.cron_expression == "0 9 * * *"
- assert job.execute_at is None
- assert job.is_recurring()
- assert not job.is_one_time()
-
- def test_one_time_job_creation(self):
- """Test creating a one-time job with datetime."""
- future_time = datetime.now() + timedelta(hours=1)
- job = MockOneTimeJob(execute_at=future_time)
- assert job.name == "one_time_test"
- assert job.execute_at == future_time
- assert job.cron_expression is None
- assert job.is_one_time()
- assert not job.is_recurring()
-
- def test_job_validation_requires_schedule(self):
- """Test that job validation requires either cron or datetime."""
- with pytest.raises(ValueError, match="Either cron_expression or execute_at must be provided"):
- MockScheduledJob(
- name="invalid_job",
- description="Invalid job without schedule",
- cron_expression=None,
- execute_at=None,
- execution_count=0,
- last_execution=None
- )
-
- def test_job_validation_exclusive_schedule(self):
- """Test that job validation prevents both cron and datetime."""
- future_time = datetime.now() + timedelta(hours=1)
- with pytest.raises(ValueError, match="Only one of cron_expression or execute_at should be provided"):
- MockScheduledJob(
- name="invalid_job",
- description="Invalid job with both schedules",
- cron_expression="0 * * * *",
- execute_at=future_time,
- execution_count=0,
- last_execution=None
- )
-
- def test_should_execute_now(self):
- """Test should_execute_now method for one-time jobs."""
- past_time = datetime.now() - timedelta(minutes=1)
- future_time = datetime.now() + timedelta(minutes=1)
-
- past_job = MockOneTimeJob(execute_at=past_time)
- future_job = MockOneTimeJob(execute_at=future_time)
- cron_job = MockScheduledJob()
-
- assert past_job.should_execute_now()
- assert not future_job.should_execute_now()
- assert not cron_job.should_execute_now()
-
-
-class TestJobScheduler:
- """Test JobScheduler functionality."""
-
- @pytest.fixture
- def scheduler(self):
- """Create a JobScheduler instance for testing."""
- return JobScheduler()
-
- def test_scheduler_initialization(self, scheduler):
- """Test scheduler initialization."""
- assert scheduler.timezone == "UTC"
- assert not scheduler.is_running()
- assert len(scheduler.list_jobs()) == 0
-
- def test_register_job(self, scheduler):
- """Test job registration."""
- job = MockScheduledJob(name="test_register")
- scheduler.register_job(job)
-
- assert len(scheduler.list_jobs()) == 1
- assert scheduler.get_job("test_register") == job
-
- def test_unregister_job(self, scheduler):
- """Test job unregistration."""
- job = MockScheduledJob(name="test_unregister")
- scheduler.register_job(job)
-
- assert scheduler.unregister_job("test_unregister")
- assert len(scheduler.list_jobs()) == 0
- assert scheduler.get_job("test_unregister") is None
-
- def test_unregister_nonexistent_job(self, scheduler):
- """Test unregistering a job that doesn't exist."""
- assert not scheduler.unregister_job("nonexistent")
-
- def test_register_disabled_job(self, scheduler):
- """Test registering a disabled job."""
- job = MockScheduledJob(name="disabled_job", enabled=False)
- scheduler.register_job(job)
-
- assert len(scheduler.list_jobs()) == 1
- assert scheduler.get_job("disabled_job") == job
-
-
-class TestMainAgentIntegration:
- """Test MainAgent integration with scheduled jobs."""
-
- @pytest.fixture
- def main_agent(self):
- """Create a MainAgent instance for testing."""
- with patch.dict('os.environ', {
- 'GITHUB_API_TOKEN': 'test_token',
- 'TWITTER_BEARER_TOKEN': 'test_twitter_token',
- 'OPENAI_API_KEY': 'test_openai_key'
- }):
- agent = MainAgent(
- model=ChatOpenAI(model="gpt-5", api_key="test_key"),
- prompts_dir="src/talos/prompts"
- )
- if agent.job_scheduler:
- agent.job_scheduler.stop()
- return agent
-
- def test_main_agent_scheduler_initialization(self, main_agent):
- """Test that MainAgent initializes with a job scheduler."""
- assert main_agent.job_scheduler is not None
- assert isinstance(main_agent.job_scheduler, JobScheduler)
-
- def test_add_scheduled_job(self, main_agent):
- """Test adding a scheduled job to MainAgent."""
- job = MockScheduledJob(name="main_agent_test")
- main_agent.add_scheduled_job(job)
-
- assert len(main_agent.list_scheduled_jobs()) == 1
- assert main_agent.get_scheduled_job("main_agent_test") == job
-
- def test_remove_scheduled_job(self, main_agent):
- """Test removing a scheduled job from MainAgent."""
- job = MockScheduledJob(name="remove_test")
- main_agent.add_scheduled_job(job)
-
- assert main_agent.remove_scheduled_job("remove_test")
- assert len(main_agent.list_scheduled_jobs()) == 0
- assert main_agent.get_scheduled_job("remove_test") is None
-
- def test_pause_resume_job(self, main_agent):
- """Test pausing and resuming jobs."""
- job = MockScheduledJob(name="pause_test")
- main_agent.add_scheduled_job(job)
-
- main_agent.pause_scheduled_job("pause_test")
- main_agent.resume_scheduled_job("pause_test")
-
- def test_predefined_jobs_registration(self):
- """Test that predefined jobs are registered during initialization."""
- job = MockScheduledJob(name="predefined_job")
-
- with patch.dict('os.environ', {
- 'GITHUB_API_TOKEN': 'test_token',
- 'TWITTER_BEARER_TOKEN': 'test_twitter_token',
- 'OPENAI_API_KEY': 'test_openai_key'
- }):
- agent = MainAgent(
- model=ChatOpenAI(model="gpt-5", api_key="test_key"),
- prompts_dir="src/talos/prompts",
- scheduled_jobs=[job]
- )
- if agent.job_scheduler:
- agent.job_scheduler.stop()
-
- assert len(agent.list_scheduled_jobs()) == 1
- assert agent.get_scheduled_job("predefined_job") == job
-
-
-def test_job_execution():
- """Test that jobs can be executed."""
- async def run_test():
- job = MockScheduledJob(name="execution_test")
-
- result = await job.run()
-
- assert job.execution_count == 1
- assert job.last_execution is not None
- assert result == "Test job executed 1 times"
-
- result2 = await job.run()
- assert job.execution_count == 2
- assert result2 == "Test job executed 2 times"
-
- asyncio.run(run_test())
-
-
-def test_one_time_job_execution():
- """Test one-time job execution."""
- async def run_test():
- future_time = datetime.now() + timedelta(seconds=1)
- job = MockOneTimeJob(execute_at=future_time)
-
- assert not job.executed
-
- result = await job.run()
-
- assert job.executed
- assert result == "One-time job executed"
-
- asyncio.run(run_test())
diff --git a/tests/test_simple.py b/tests/test_simple.py
deleted file mode 100644
index 3b79f3d7..00000000
--- a/tests/test_simple.py
+++ /dev/null
@@ -1,2 +0,0 @@
-def test_always_passes():
- assert True
diff --git a/tests/test_supervised_tool.py b/tests/test_supervised_tool.py
deleted file mode 100644
index 65d95962..00000000
--- a/tests/test_supervised_tool.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-
-from langchain_core.tools import tool
-
-from talos.tools.supervised_tool import SupervisedTool
-from tests.simple_supervisor import SimpleSupervisor
-
-
-@tool
-def dummy_tool(x: int) -> int:
- """A dummy tool."""
- return x * 2
-
-
-def test_supervised_tool_unsupervised() -> None:
- supervised_tool = SupervisedTool(
- tool=dummy_tool,
- supervisor=None,
- messages=[],
- name=dummy_tool.name,
- description=dummy_tool.description,
- args_schema=dummy_tool.args_schema,
- )
- assert supervised_tool.run({"x": 1}) == 2
-
-
-def test_supervised_tool_supervised() -> None:
- supervisor = SimpleSupervisor()
- supervised_tool = SupervisedTool(
- tool=dummy_tool,
- supervisor=supervisor,
- messages=[],
- name=dummy_tool.name,
- description=dummy_tool.description,
- args_schema=dummy_tool.args_schema,
- )
- assert supervised_tool.run({"x": 1}) == "Denied by SimpleSupervisor"
- assert supervised_tool.run({"x": 1}) == 2
- assert supervised_tool.run({"x": 1}) == "Denied by SimpleSupervisor"
- assert supervised_tool.run({"x": 1}) == 2
diff --git a/tests/test_supervisor.py b/tests/test_supervisor.py
deleted file mode 100644
index 27ec767d..00000000
--- a/tests/test_supervisor.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-
-from langchain_core.tools import tool
-
-from talos.hypervisor.supervisor import Rule, RuleBasedSupervisor
-from talos.tools.supervised_tool import SupervisedTool
-
-
-def test_rule_based_supervisor():
- """
- Tests that the rule-based supervisor correctly approves or denies actions.
- """
-
- @tool
- def dummy_tool(x: int) -> int:
- """A dummy tool."""
- return x * 2
-
- rules = [
- Rule(
- tool_name="dummy_tool",
- validations={"x": lambda x: (x > 0, "x must be greater than 0") if x <= 0 else (True, None)},
- )
- ]
- supervisor = RuleBasedSupervisor(rules=rules)
- supervised_tool = SupervisedTool(
- tool=dummy_tool,
- supervisor=supervisor,
- messages=[],
- name=dummy_tool.name,
- description=dummy_tool.description,
- args_schema=dummy_tool.args_schema,
- )
-
- # Test that the supervisor approves a valid action.
- assert supervised_tool.run({"x": 1}) == 2
-
- # Test that the supervisor denies an invalid action.
- result = supervised_tool.run({"x": -1})
- assert result == "x must be greater than 0"
diff --git a/tests/test_talos_sentiment.py b/tests/test_talos_sentiment.py
deleted file mode 100644
index 6ac24b3d..00000000
--- a/tests/test_talos_sentiment.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import json
-from datetime import datetime, timezone
-from unittest.mock import MagicMock, patch
-
-from talos.services.implementations.talos_sentiment import TalosSentimentService
-from talos.skills.talos_sentiment_skill import TalosSentimentSkill
-
-
-def test_talos_sentiment_service_run():
- with (
- patch("talos.services.implementations.talos_sentiment.TweepyClient", autospec=True) as mock_tweepy_client_class,
- patch("talos.services.implementations.talos_sentiment.LLMClient", autospec=True) as mock_llm_client_class,
- patch(
- "talos.services.implementations.talos_sentiment.FilePromptManager",
- autospec=True,
- ) as mock_prompt_manager_class,
- ):
- # Mock the PromptManager
- mock_prompt = MagicMock()
- mock_prompt.template = "This is a test prompt."
- mock_prompt_manager = mock_prompt_manager_class.return_value
- mock_prompt_manager.get_prompt.return_value = mock_prompt
-
- # Mock the TweepyClient with v2 API format
- mock_tweet = MagicMock()
- mock_tweet.text = "This is a test tweet about Talos."
- mock_tweet.author_id = "user123"
- mock_tweet.public_metrics = {"like_count": 20, "retweet_count": 10, "reply_count": 5, "quote_count": 3}
- mock_tweet.created_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
-
- mock_user = {"id": "user123", "username": "test_user", "public_metrics": {"followers_count": 100}}
-
- mock_response = MagicMock()
- mock_response.data = [mock_tweet]
- mock_response.includes = {"users": [mock_user]}
-
- mock_twitter_client = mock_tweepy_client_class.return_value
- mock_twitter_client.search_tweets.return_value = mock_response
-
- # Mock the LLMClient
- mock_llm_client = mock_llm_client_class.return_value
- mock_llm_client.reasoning.return_value = json.dumps({"score": 75, "report": "This is a test report."})
-
- service = TalosSentimentService(
- prompt_manager=mock_prompt_manager,
- twitter_client=mock_twitter_client,
- llm_client=mock_llm_client,
- )
- response = service.analyze_sentiment(search_query="talos")
-
- # Assert that the llm was called with the correct arguments
- expected_tweet_data = [
- {
- "text": "This is a test tweet about Talos.",
- "author": "test_user",
- "followers": 100,
- "likes": 20,
- "retweets": 10,
- "replies": 5,
- "quotes": 3,
- "total_engagement": 38,
- "engagement_rate": 38.0,
- "age_in_days": 0,
- }
- ]
- expected_sentiment_prompt = mock_prompt.template.format(tweets=json.dumps(expected_tweet_data))
- mock_llm_client.reasoning.assert_called_once_with(expected_sentiment_prompt)
-
- # Assert that the response is correct
- assert response.score == 75
- assert response.answers[0] == "This is a test report."
-
-
-def test_talos_sentiment_skill_get_sentiment():
- with (
- patch("talos.services.implementations.talos_sentiment.TweepyClient", autospec=True),
- patch("talos.services.implementations.talos_sentiment.LLMClient", autospec=True),
- ):
- with patch("talos.skills.talos_sentiment_skill.TalosSentimentService", autospec=True) as mock_service_class:
- mock_service_instance = mock_service_class.return_value
- mock_service_instance.analyze_sentiment.return_value.score = 75
- mock_service_instance.analyze_sentiment.return_value.answers = ["This is a test report."]
-
- skill = TalosSentimentSkill(sentiment_service=mock_service_instance)
- result = skill.run(search_query="talos")
-
- # Assert that the result is correct
- assert result["score"] == 75
- assert result["report"] == "This is a test report."
diff --git a/tests/test_thread_sentiment.py b/tests/test_thread_sentiment.py
deleted file mode 100644
index 48361523..00000000
--- a/tests/test_thread_sentiment.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from unittest.mock import Mock, patch
-
-from thread_sentiment import main
-
-
-def test_import():
- assert main is not None
-
-
-@patch("openai.AsyncOpenAI")
-@patch("openai.OpenAI")
-@patch("thread_sentiment.main.prompt_manager")
-def test_analyze_sentiment(mock_prompt_manager, mock_openai, mock_async_openai):
- # Mock the Agent class
- main.Agent = Mock()
-
- # Mock the run method
- main.Agent.return_value.run.return_value = main.AIMessage(content="The sentiment is positive.")
-
- # Mock the prompt manager
- mock_prompt_manager.get_prompt.return_value.template = "test"
-
- # Call the function
- sentiment = main.analyze_sentiment([{"text": "This is a great tweet!", "followers": 100}])
-
- # Assert the result
- assert sentiment == "The sentiment is positive."
diff --git a/tests/test_tools.py b/tests/test_tools.py
deleted file mode 100644
index a131e355..00000000
--- a/tests/test_tools.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import unittest
-
-from langchain_core.tools import BaseTool
-
-from talos.tools.tool_manager import ToolManager
-
-
-class MockTool(BaseTool):
- name: str = "mock_tool"
- description: str = "A mock tool for testing."
-
- def _run(self, *args, **kwargs):
- pass
-
-
-class TestToolManager(unittest.TestCase):
- def setUp(self):
- self.tool_manager = ToolManager()
-
- def test_register_tool(self):
- tool = MockTool()
- self.tool_manager.register_tool(tool)
- self.assertIn("mock_tool", self.tool_manager.tools)
-
- def test_register_duplicate_tool(self):
- tool = MockTool()
- self.tool_manager.register_tool(tool)
- with self.assertRaises(ValueError):
- self.tool_manager.register_tool(tool)
-
- def test_unregister_tool(self):
- tool = MockTool()
- self.tool_manager.register_tool(tool)
- self.tool_manager.unregister_tool("mock_tool")
- self.assertNotIn("mock_tool", self.tool_manager.tools)
-
- def test_unregister_nonexistent_tool(self):
- with self.assertRaises(ValueError):
- self.tool_manager.unregister_tool("nonexistent_tool")
-
- def test_get_tool(self):
- tool = MockTool()
- self.tool_manager.register_tool(tool)
- retrieved_tool = self.tool_manager.get_tool("mock_tool")
- self.assertEqual(retrieved_tool, tool)
-
- def test_get_nonexistent_tool(self):
- with self.assertRaises(ValueError):
- self.tool_manager.get_tool("nonexistent_tool")
-
- def test_get_all_tools(self):
- tool1 = MockTool()
- tool2 = MockTool()
- tool2.name = "mock_tool2"
- self.tool_manager.register_tool(tool1)
- self.tool_manager.register_tool(tool2)
- tools = self.tool_manager.get_all_tools()
- self.assertEqual(len(tools), 2)
- self.assertIn(tool1, tools)
- self.assertIn(tool2, tools)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_twitter_persona.py b/tests/test_twitter_persona.py
deleted file mode 100644
index 19a88789..00000000
--- a/tests/test_twitter_persona.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import unittest
-from typing import Any
-from unittest.mock import MagicMock, patch
-
-from talos.prompts.prompt import Prompt
-from talos.prompts.prompt_manager import PromptManager
-from talos.skills.twitter_persona import TwitterPersonaSkill
-from talos.tools.twitter import TwitterTool, TwitterToolName
-from talos.tools.twitter_client import TwitterClient
-
-
-class MockTwitterClient(TwitterClient):
- def get_user(self, username: str) -> Any:
- pass
-
- def search_tweets(self, query: str) -> list[Any]:
- return []
-
- def get_user_timeline(self, username: str) -> list[Any]:
- return []
-
- def get_user_mentions(self, username: str) -> list[Any]:
- return []
-
- def get_tweet(self, tweet_id: str) -> Any:
- pass
-
- def get_sentiment(self, search_query: str = "talos") -> float:
- return 0.5
-
- def post_tweet(self, tweet: str) -> Any:
- pass
-
- def reply_to_tweet(self, tweet_id: str, tweet: str) -> Any:
- pass
-
-
-class MockPromptManager(PromptManager):
- def get_prompt(self, name: str) -> Prompt | None:
- return Prompt(name="test_prompt", template="This is a test prompt.", input_variables=[])
-
- def get_prompt_with_config(self, config, context):
- return Prompt(name="test_prompt", template="This is a test prompt.", input_variables=[])
-
-
-class TestTwitterPersona(unittest.TestCase):
- @patch("talos.skills.twitter_persona.ChatOpenAI")
- def test_generate_persona_prompt(self, MockChatOpenAI):
- # Create mocks
- mock_twitter_client = MockTwitterClient()
- mock_prompt_manager = MockPromptManager()
- mock_llm = MockChatOpenAI.return_value
-
- from talos.models.twitter import TwitterPersonaResponse
- mock_structured_llm = MagicMock()
- mock_structured_llm.invoke.return_value = TwitterPersonaResponse(
- report="This is a persona description.",
- topics=["technology", "startups"],
- style=["analytical", "technical"]
- )
- mock_llm.with_structured_output.return_value = mock_structured_llm
-
- # Mock the API responses
- mock_tweet = MagicMock()
- mock_tweet.text = "This is a test tweet."
- mock_tweet.get_replied_to_id.return_value = None
-
- mock_twitter_client.get_user_timeline = MagicMock(return_value=[mock_tweet])
- mock_twitter_client.get_user_mentions = MagicMock(return_value=[mock_tweet])
-
- # Create the TwitterPersonaSkill with the mock client
- persona_skill = TwitterPersonaSkill(
- twitter_client=mock_twitter_client,
- prompt_manager=mock_prompt_manager,
- llm=mock_llm,
- )
-
- # Run the skill
- response = persona_skill.run(username="testuser")
-
- # Check the output
- self.assertIsInstance(response, TwitterPersonaResponse)
- self.assertEqual(response.report, "This is a persona description.")
- self.assertEqual(response.topics, ["technology", "startups"])
- self.assertEqual(response.style, ["analytical", "technical"])
- mock_llm.with_structured_output.assert_called_once_with(TwitterPersonaResponse)
-
- def test_twitter_tool_generate_persona_prompt(self):
- # Create a mock Twitter client
- mock_twitter_client = MockTwitterClient()
-
- # Create the TwitterTool with the mock client
- twitter_tool = TwitterTool(twitter_client=mock_twitter_client)
-
- # Run the tool
- with patch(
- "talos.tools.twitter.TwitterPersonaSkill",
- ) as MockTwitterPersonaSkill:
- from talos.models.twitter import TwitterPersonaResponse
- mock_persona_skill = MockTwitterPersonaSkill.return_value
- mock_persona_skill.run.return_value = TwitterPersonaResponse(
- report="This is a rendered prompt.",
- topics=["crypto", "trading"],
- style=["confident", "data-driven"]
- )
- response = twitter_tool._run(tool_name=TwitterToolName.GENERATE_PERSONA_PROMPT, username="testuser")
-
- # Check the output - should return just the report
- self.assertEqual(response, "This is a rendered prompt.")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_twitter_tool.py b/tests/test_twitter_tool.py
deleted file mode 100644
index e926422b..00000000
--- a/tests/test_twitter_tool.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import unittest
-from datetime import datetime
-from typing import Any
-from unittest.mock import MagicMock
-
-from talos.models.evaluation import EvaluationResult
-from talos.models.twitter import TwitterPublicMetrics, TwitterUser
-from talos.tools.twitter import TwitterTool
-from talos.tools.twitter_client import TwitterClient
-from talos.tools.twitter_evaluator import TwitterAccountEvaluator
-
-
-class MockTwitterClient(TwitterClient):
- def get_user(self, username: str):
- return TwitterUser(
- id=12345,
- username="testuser",
- name="Test User",
- created_at=datetime.now(),
- profile_image_url="http://example.com/image.jpg",
- public_metrics=TwitterPublicMetrics(
- followers_count=100,
- following_count=10,
- tweet_count=50,
- listed_count=5,
- like_count=200,
- media_count=10,
- ),
- description="This is a test user.",
- url="http://example.com",
- verified=True,
- )
-
- def search_tweets(self, query: str):
- return []
-
- def get_user_timeline(self, username: str) -> list[Any]:
- mock_tweet = MagicMock()
- mock_tweet.text = "This is a test tweet from the timeline."
- return [mock_tweet]
-
- def get_user_mentions(self, username: str) -> list[Any]:
- mock_tweet = MagicMock()
- mock_tweet.text = "This is a test tweet from the mentions."
- mock_tweet.in_reply_to_screen_name = "testuser"
- return [mock_tweet]
-
- def get_tweet(self, tweet_id: str) -> Any:
- mock_tweet = MagicMock()
- mock_tweet.text = "This is a test tweet."
- return mock_tweet
-
- def get_sentiment(self, search_query: str = "talos") -> float:
- return 0.5
-
- def post_tweet(self, tweet: str) -> Any:
- pass
-
- def reply_to_tweet(self, tweet_id: str, tweet: str) -> Any:
- pass
-
-
-class MockTwitterAccountEvaluator(TwitterAccountEvaluator):
- def evaluate(self, user: TwitterUser) -> EvaluationResult:
- return EvaluationResult(
- score=75,
- additional_data={
- "follower_following_ratio": 10,
- "account_age_days": 1000,
- "is_verified": True,
- "is_default_profile_image": False,
- },
- )
-
-
-class TestTwitterTool(unittest.TestCase):
- def test_evaluate_account(self):
- # Create mock dependencies
- mock_twitter_client = MockTwitterClient()
- mock_account_evaluator = MockTwitterAccountEvaluator()
-
- # Create an instance of the TwitterTool with the mock client and evaluator
- twitter_tool = TwitterTool(twitter_client=mock_twitter_client, account_evaluator=mock_account_evaluator)
-
- # Call the evaluate_account method
- result = twitter_tool.evaluate_account("test_user")
-
- # Assert the results
- self.assertEqual(result.score, 75)
- self.assertEqual(result.additional_data["follower_following_ratio"], 10)
- self.assertGreater(result.additional_data["account_age_days"], 365)
- self.assertTrue(result.additional_data["is_verified"])
- self.assertFalse(result.additional_data["is_default_profile_image"])
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/test_twitter_voice.py b/tests/test_twitter_voice.py
deleted file mode 100644
index 9c2ddb42..00000000
--- a/tests/test_twitter_voice.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from unittest.mock import patch, MagicMock
-
-from talos.models.twitter import TwitterPersonaResponse
-from talos.skills.twitter_voice import TwitterVoiceSkill
-
-
-class TestTwitterVoiceSkill:
- @patch('talos.skills.twitter_voice.TwitterPersonaSkill')
- @patch('talos.skills.twitter_voice.ChatOpenAI')
- def test_fallback_voice_characteristics(self, mock_chat_openai, mock_persona_skill_class):
- """Test that fallback voice characteristics are properly defined."""
- mock_llm = mock_chat_openai.return_value
- mock_persona_skill_class.return_value = MagicMock()
-
- skill = TwitterVoiceSkill(llm=mock_llm)
- fallback = skill._get_fallback_talos_voice()
-
- assert isinstance(fallback, TwitterPersonaResponse)
- assert len(fallback.topics) > 0
- assert len(fallback.style) > 0
- assert "autonomous" in fallback.report.lower()
-
- @patch('talos.skills.twitter_voice.TwitterPersonaSkill')
- @patch('talos.skills.twitter_voice.ChatOpenAI')
- def test_run_with_twitter_success(self, mock_chat_openai, mock_persona_skill_class):
- """Test successful Twitter analysis."""
- mock_llm = mock_chat_openai.return_value
- mock_response = TwitterPersonaResponse(
- report="Test analysis",
- topics=["AI", "crypto"],
- style=["technical", "concise"]
- )
- mock_persona_skill_instance = MagicMock()
- mock_persona_skill_instance.run.return_value = mock_response
- mock_persona_skill_class.return_value = mock_persona_skill_instance
-
- skill = TwitterVoiceSkill(llm=mock_llm)
- result = skill.run(username="test_user")
-
- assert result["voice_source"] == "twitter_analysis"
- assert result["username"] == "test_user"
- assert "voice_prompt" in result
-
- @patch('talos.skills.twitter_voice.TwitterPersonaSkill')
- @patch('talos.skills.twitter_voice.ChatOpenAI')
- def test_run_with_twitter_failure(self, mock_chat_openai, mock_persona_skill_class):
- """Test fallback when Twitter analysis fails."""
- mock_llm = mock_chat_openai.return_value
- mock_persona_skill_instance = MagicMock()
- mock_persona_skill_instance.run.side_effect = Exception("API Error")
- mock_persona_skill_class.return_value = mock_persona_skill_instance
-
- skill = TwitterVoiceSkill(llm=mock_llm)
- result = skill.run(username="talos_is")
-
- assert result["voice_source"] == "fallback_analysis"
- assert result["username"] == "talos_is"
- assert "voice_prompt" in result
-
- @patch('talos.skills.twitter_voice.TwitterPersonaSkill')
- @patch('talos.skills.twitter_voice.ChatOpenAI')
- def test_generate_voice_prompt(self, mock_chat_openai, mock_persona_skill_class):
- """Test voice prompt generation."""
- mock_llm = mock_chat_openai.return_value
- mock_persona_skill_class.return_value = MagicMock()
-
- skill = TwitterVoiceSkill(llm=mock_llm)
- persona = TwitterPersonaResponse(
- report="Test communication style",
- topics=["topic1", "topic2"],
- style=["style1", "style2"]
- )
-
- prompt = skill._generate_voice_prompt(persona)
- assert "style1, style2" in prompt
- assert "topic1, topic2" in prompt
- assert "Test communication style" in prompt
diff --git a/tests/test_yield_manager.py b/tests/test_yield_manager.py
deleted file mode 100644
index 9ebd2952..00000000
--- a/tests/test_yield_manager.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import json
-import unittest
-from unittest.mock import MagicMock, patch
-
-from talos.models.dexscreener import DexscreenerData
-from talos.models.gecko_terminal import OHLCV, GeckoTerminalOHLCVData
-from talos.services.implementations.yield_manager import YieldManagerService
-
-
-class TestYieldManagerService(unittest.TestCase):
- @patch("talos.services.implementations.yield_manager.TweepyClient")
- def test_update_staking_apr(self, mock_tweepy_client):
- dexscreener_client = MagicMock()
- gecko_terminal_client = MagicMock()
- llm_client = MagicMock()
-
- dexscreener_client.get_talos_data.return_value = DexscreenerData(
- priceUsd=1.0,
- priceChange=0.1,
- volume=1000000,
- )
- gecko_terminal_client.get_ohlcv_data.return_value = GeckoTerminalOHLCVData(
- ohlcv_list=[
- OHLCV(
- timestamp=1672531200,
- open=0.1,
- high=0.11,
- low=0.09,
- close=0.1,
- volume=1000000,
- )
- ]
- )
- llm_client.reasoning.return_value = json.dumps(
- {"apr": 0.15, "explanation": "The APR has been updated based on market conditions."}
- )
-
- yield_manager = YieldManagerService(dexscreener_client, gecko_terminal_client, llm_client)
- yield_manager.get_staked_supply_percentage = MagicMock(return_value=0.6)
-
- new_apr = yield_manager.update_staking_apr(75.0, "A report")
-
- self.assertIsInstance(new_apr, float)
- self.assertEqual(new_apr, 0.15)
-
- @patch("talos.services.implementations.yield_manager.TweepyClient")
- def test_min_max_yield_validation(self, mock_tweepy_client):
- dexscreener_client = MagicMock()
- gecko_terminal_client = MagicMock()
- llm_client = MagicMock()
-
- with self.assertRaises(ValueError):
- YieldManagerService(dexscreener_client, gecko_terminal_client, llm_client, min_yield=-0.01)
-
- with self.assertRaises(ValueError):
- YieldManagerService(dexscreener_client, gecko_terminal_client, llm_client, min_yield=0.2, max_yield=0.1)
-
- @patch("talos.services.implementations.yield_manager.TweepyClient")
- def test_apr_bounds_enforcement(self, mock_tweepy_client):
- dexscreener_client = MagicMock()
- gecko_terminal_client = MagicMock()
- llm_client = MagicMock()
-
- dexscreener_client.get_talos_data.return_value = DexscreenerData(
- priceUsd=1.0,
- priceChange=0.1,
- volume=1000000,
- )
- gecko_terminal_client.get_ohlcv_data.return_value = GeckoTerminalOHLCVData(ohlcv_list=[])
-
- llm_client.reasoning.return_value = json.dumps({"apr": 0.25, "explanation": "High APR recommendation"})
-
- yield_manager = YieldManagerService(
- dexscreener_client, gecko_terminal_client, llm_client, min_yield=0.05, max_yield=0.20
- )
- yield_manager.get_staked_supply_percentage = MagicMock(return_value=0.5)
-
- new_apr = yield_manager.update_staking_apr(75.0, "A report")
- self.assertEqual(new_apr, 0.20)
-
- llm_client.reasoning.return_value = json.dumps({"apr": 0.01, "explanation": "Low APR recommendation"})
-
- new_apr = yield_manager.update_staking_apr(75.0, "A report")
- self.assertEqual(new_apr, 0.05)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/uv.lock b/uv.lock
deleted file mode 100644
index 24c519d2..00000000
--- a/uv.lock
+++ /dev/null
@@ -1,2948 +0,0 @@
-version = 1
-revision = 2
-requires-python = ">=3.12"
-resolution-markers = [
- "python_full_version >= '3.13'",
- "python_full_version < '3.13'",
-]
-
-[[package]]
-name = "aiohappyeyeballs"
-version = "2.6.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" },
-]
-
-[[package]]
-name = "aiohttp"
-version = "3.12.15"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "aiohappyeyeballs" },
- { name = "aiosignal" },
- { name = "attrs" },
- { name = "frozenlist" },
- { name = "multidict" },
- { name = "propcache" },
- { name = "yarl" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" },
- { url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" },
- { url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" },
- { url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" },
- { url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" },
- { url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" },
- { url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" },
- { url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" },
- { url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" },
- { url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" },
- { url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" },
- { url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" },
- { url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" },
- { url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" },
- { url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" },
- { url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" },
- { url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" },
- { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" },
- { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" },
- { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" },
- { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" },
- { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" },
- { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" },
- { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" },
- { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" },
- { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" },
- { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" },
- { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" },
- { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" },
- { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" },
- { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" },
- { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" },
- { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" },
- { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" },
-]
-
-[[package]]
-name = "aiosignal"
-version = "1.4.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "frozenlist" },
- { name = "typing-extensions", marker = "python_full_version < '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
-]
-
-[[package]]
-name = "alembic"
-version = "1.14.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mako" },
- { name = "sqlalchemy" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/00/1e/8cb8900ba1b6360431e46fb7a89922916d3a1b017a8908a7c0499cc7e5f6/alembic-1.14.0.tar.gz", hash = "sha256:b00892b53b3642d0b8dbedba234dbf1924b69be83a9a769d5a624b01094e304b", size = 1916172, upload-time = "2024-11-04T18:44:22.066Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cb/06/8b505aea3d77021b18dcbd8133aa1418f1a1e37e432a465b14c46b2c0eaa/alembic-1.14.0-py3-none-any.whl", hash = "sha256:99bd884ca390466db5e27ffccff1d179ec5c05c965cfefc0607e69f9e411cb25", size = 233482, upload-time = "2024-11-04T18:44:24.335Z" },
-]
-
-[[package]]
-name = "annotated-types"
-version = "0.7.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
-]
-
-[[package]]
-name = "anthropic"
-version = "0.66.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
- { name = "distro" },
- { name = "httpx" },
- { name = "jiter" },
- { name = "pydantic" },
- { name = "sniffio" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fa/50/daa51c035e6a941f7b8034705796c7643443a85f5381cb41a797757fc6d3/anthropic-0.66.0.tar.gz", hash = "sha256:5aa8b18da57dc27d83fc1d82c9fb860977e5adfae3e0c215d7ab2ebd70afb9cb", size = 436933, upload-time = "2025-09-03T14:55:40.879Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/00/6a/d4ec7de9cc88b9a39c74dab1db259203b29b17fc564ecd1f92991678bd1e/anthropic-0.66.0-py3-none-any.whl", hash = "sha256:67b8cd4486f3cdd09211598dc5325cc8e4e349c106a03041231d551603551c06", size = 308035, upload-time = "2025-09-03T14:55:39.109Z" },
-]
-
-[[package]]
-name = "anyio"
-version = "4.10.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "idna" },
- { name = "sniffio" },
- { name = "typing-extensions", marker = "python_full_version < '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
-]
-
-[[package]]
-name = "apscheduler"
-version = "3.10.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pytz" },
- { name = "six" },
- { name = "tzlocal" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/5e/34/5dcb368cf89f93132d9a31bd3747962a9dc874480e54333b0c09fa7d56ac/APScheduler-3.10.4.tar.gz", hash = "sha256:e6df071b27d9be898e486bc7940a7be50b4af2e9da7c08f0744a96d4bd4cef4a", size = 100832, upload-time = "2023-08-19T16:44:58.293Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/13/b5/7af0cb920a476dccd612fbc9a21a3745fb29b1fcd74636078db8f7ba294c/APScheduler-3.10.4-py3-none-any.whl", hash = "sha256:fb91e8a768632a4756a585f79ec834e0e27aad5860bac7eaa523d9ccefd87661", size = 59303, upload-time = "2023-08-19T16:44:56.814Z" },
-]
-
-[[package]]
-name = "attrs"
-version = "25.3.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" },
-]
-
-[[package]]
-name = "base58"
-version = "2.1.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7f/45/8ae61209bb9015f516102fa559a2914178da1d5868428bd86a1b4421141d/base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c", size = 6528, upload-time = "2021-10-30T22:12:17.858Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4a/45/ec96b29162a402fc4c1c5512d114d7b3787b9d1c2ec241d9568b4816ee23/base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2", size = 5621, upload-time = "2021-10-30T22:12:16.658Z" },
-]
-
-[[package]]
-name = "beautifulsoup4"
-version = "4.13.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "soupsieve" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067, upload-time = "2025-04-15T17:05:13.836Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" },
-]
-
-[[package]]
-name = "bitarray"
-version = "3.7.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/99/b6/282f5f0331b3877d4e79a8aa1cf63b5113a10f035a39bef1fa1dfe9e9e09/bitarray-3.7.1.tar.gz", hash = "sha256:795b1760418ab750826420ae24f06f392c08e21dc234f0a369a69cc00444f8ec", size = 150474, upload-time = "2025-08-28T22:18:15.346Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/89/27/46b5b4dabecf84f750587cded3640658448d27c59f4dd2cbaa589085f43a/bitarray-3.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b99a0347bc6131046c19e056a113daa34d7df99f1f45510161bc78bc8461a470", size = 147349, upload-time = "2025-08-28T22:15:32.729Z" },
- { url = "https://files.pythonhosted.org/packages/f9/1e/7f61150577127a1540136ba8a63ba17c661a17e721e03404fcd5833a4a05/bitarray-3.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d7e274ac1975e55ebfb8166cce27e13dc99120c1d6ce9e490d7a716b9be9abb5", size = 143922, upload-time = "2025-08-28T22:15:33.963Z" },
- { url = "https://files.pythonhosted.org/packages/ca/b2/7c852472df8c644d05530bc0ad586fead5f23a9d176873c2c54f57e16b4e/bitarray-3.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b9a2eb7d2e0e9c2f25256d2663c0a2a4798fe3110e3ddbbb1a7b71740b4de08", size = 330277, upload-time = "2025-08-28T22:15:34.997Z" },
- { url = "https://files.pythonhosted.org/packages/7b/38/681340eea0997c48ef2dbf1acb0786090518704ca32f9a2c3c669bdea08e/bitarray-3.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e15e70a3cf5bb519e2448524d689c02ff6bcd4750587a517e2bffee06065bf27", size = 349562, upload-time = "2025-08-28T22:15:36.554Z" },
- { url = "https://files.pythonhosted.org/packages/c4/f4/6fc43f896af85c5b10a74b1d8a87c05915464869594131a2d7731707a108/bitarray-3.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c65257899bb8faf6a111297b4ff0066324a6b901318582c0453a01422c3bcd5a", size = 341249, upload-time = "2025-08-28T22:15:37.774Z" },
- { url = "https://files.pythonhosted.org/packages/89/c7/1f71164799cacd44964ead87e1fc7e2f0ddec6d0519515a82d54eb8c8a13/bitarray-3.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38b0261483c59bb39ae9300ad46bf0bbf431ab604266382d986a349c96171b36", size = 332874, upload-time = "2025-08-28T22:15:38.935Z" },
- { url = "https://files.pythonhosted.org/packages/95/cd/4d7c19064fa7fe94c2818712695fa186a1d0bb9c5cb0cf34693df81d3202/bitarray-3.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2b1ed363a4ef5622dccbf7822f01b51195062c4f382b28c9bd125d046d0324c", size = 321107, upload-time = "2025-08-28T22:15:40.071Z" },
- { url = "https://files.pythonhosted.org/packages/1e/d2/7d5ffe491c70614c0eb4a0186666efe925a02e25ed80ebd19c5fcb1c62e8/bitarray-3.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dfde50ae55e075dcd5801e2c3ea0e749c849ed2cbbee991af0f97f1bdbadb2a6", size = 324999, upload-time = "2025-08-28T22:15:41.241Z" },
- { url = "https://files.pythonhosted.org/packages/11/d9/95fb87ec72c01169dad574baf7bc9e0d2bb73975d7ea29a83920a38646f4/bitarray-3.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45660e2fabcdc1bab9699a468b312f47956300d41d6a2ea91c8f067572aaf38a", size = 321816, upload-time = "2025-08-28T22:15:42.417Z" },
- { url = "https://files.pythonhosted.org/packages/6b/3d/57ac96bbd125df75219c59afa297242054c09f22548aff028a8cefa8f120/bitarray-3.7.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7b4a41dc183d7d16750634f65566205990f94144755a39f33da44c0350c3e1a8", size = 349342, upload-time = "2025-08-28T22:15:43.997Z" },
- { url = "https://files.pythonhosted.org/packages/a9/14/d28f7456d2c3b3f7898186498b6d7fd3eecab267c300fb333fc2a8d55965/bitarray-3.7.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8e07374d60040b24d1a158895d9758424db13be63d4b2fe1870e37f9dec009", size = 350501, upload-time = "2025-08-28T22:15:45.377Z" },
- { url = "https://files.pythonhosted.org/packages/bb/a4/0f803dc446e602b21e61315f5fa2cdec02a65340147b08f7efadba559f38/bitarray-3.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f31d8c2168bf2a52e4539232392352832c2296e07e0e14b6e06a44da574099ba", size = 331362, upload-time = "2025-08-28T22:15:46.577Z" },
- { url = "https://files.pythonhosted.org/packages/c9/03/25e4c4b91a33f1eae0a9e9b2b11f1eaed14e37499abbde154ff33888f5f5/bitarray-3.7.1-cp312-cp312-win32.whl", hash = "sha256:fe1f1f4010244cb07f6a079854a12e1627e4fb9ea99d672f2ceccaf6653ca514", size = 141474, upload-time = "2025-08-28T22:15:48.185Z" },
- { url = "https://files.pythonhosted.org/packages/25/53/98efa8ee389e4cbd91fc7c87bfebd4e11d6f8a027eb3f9be42d1addf1f51/bitarray-3.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:f41a4b57cbc128a699e9d716a56c90c7fc76554e680fe2962f49cc4d8688b051", size = 148458, upload-time = "2025-08-28T22:15:49.256Z" },
- { url = "https://files.pythonhosted.org/packages/97/7f/16d59c041b0208bc1003fcfbf466f1936b797440e6119ce0adca7318af48/bitarray-3.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e62892645f6a214eefb58a42c3ed2501af2e40a797844e0e09ec1e400ce75f3d", size = 147343, upload-time = "2025-08-28T22:15:50.617Z" },
- { url = "https://files.pythonhosted.org/packages/1a/fb/5add457d3faa0e17fde5e220bb33c0084355b9567ff9bcba2fe70fef3626/bitarray-3.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3092f6bbf4a75b1e6f14a5b1030e27c435f341afeb23987115e45a25cc68ba91", size = 143904, upload-time = "2025-08-28T22:15:52.06Z" },
- { url = "https://files.pythonhosted.org/packages/95/b9/c5ab584bb8d0ba1ec72eaac7fc1e712294db77a6230c033c9b15a2de33ae/bitarray-3.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:851398428f5604c53371b72c5e0a28163274264ada4a08cd1eafe65fde1f68d0", size = 330206, upload-time = "2025-08-28T22:15:53.492Z" },
- { url = "https://files.pythonhosted.org/packages/f0/cd/a4d95232a2374ce55e740fbb052a1e3a9aa52e96c7597d9152b1c9d79ecc/bitarray-3.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa05460dc4f57358680b977b4a254d331b24c8beb501319b998625fd6a22654b", size = 349372, upload-time = "2025-08-28T22:15:55.043Z" },
- { url = "https://files.pythonhosted.org/packages/69/6c/8fb54cea100bd9358a7478d392042845800e809ab3a00873f2f0ae3d0306/bitarray-3.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ad0df7886cb9d6d2ff75e87d323108a0e32bdca5c9918071681864129ce8ea8", size = 341120, upload-time = "2025-08-28T22:15:56.372Z" },
- { url = "https://files.pythonhosted.org/packages/bd/eb/dcbb1782bf93afa2baccbc1206bb1053f61fe999443e9180e7d9be322565/bitarray-3.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55c31bc3d2c9e48741c812ee5ce4607c6f33e33f339831c214d923ffc7777d21", size = 332759, upload-time = "2025-08-28T22:15:57.984Z" },
- { url = "https://files.pythonhosted.org/packages/e2/f2/164aed832c5ece367d5347610cb7e50e5706ca1a882b9f172cb84669f591/bitarray-3.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44f468fb4857fff86c65bec5e2fb67067789e40dad69258e9bb78fc6a6df49e7", size = 320992, upload-time = "2025-08-28T22:16:01.039Z" },
- { url = "https://files.pythonhosted.org/packages/35/35/fd51da63ad364d5c03690bb895e34b20c9bedce10c6d0b4d7ed7677c4b09/bitarray-3.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:340c524c7c934b61d1985d805bffe7609180fb5d16ece6ce89b51aa535b936f2", size = 324987, upload-time = "2025-08-28T22:16:02.327Z" },
- { url = "https://files.pythonhosted.org/packages/a3/f3/3f4f31a80f343c6c3360ca4eac04f471bf009b6346de745016f8b4990bad/bitarray-3.7.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0751596f60f33df66245b2dafa3f7fbe13cb7ac91dd14ead87d8c2eec57cb3ed", size = 321816, upload-time = "2025-08-28T22:16:03.751Z" },
- { url = "https://files.pythonhosted.org/packages/f5/60/26ce8cff96255198581cb88f9566820d6b3c262db4c185995cc5537b3d07/bitarray-3.7.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e501bd27c795105aaba02b5212ecd1bb552ca2ee2ede53e5a8cb74deee0e2052", size = 349354, upload-time = "2025-08-28T22:16:04.966Z" },
- { url = "https://files.pythonhosted.org/packages/dc/f8/e2edda9c37ba9be5349beb145dcad14d8d339f7de293b4b2bd770227c5a7/bitarray-3.7.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe2493d3f49e314e573022ead4d8c845c9748979b7eb95e815429fe947c4bde2", size = 350491, upload-time = "2025-08-28T22:16:06.778Z" },
- { url = "https://files.pythonhosted.org/packages/c0/c5/b82dd6bd8699ad818c13ae02b6acfc6c38c9278af1f71005b5d0c5f29338/bitarray-3.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1f1575cc0f66aa70a0bb5cb57c8d9d1b7d541d920455169c6266919bf804dc20", size = 331367, upload-time = "2025-08-28T22:16:08.53Z" },
- { url = "https://files.pythonhosted.org/packages/51/82/03613ad262d6e2a76b906dd279de26694910a95e4ed8ebde57c9fd3f3aa7/bitarray-3.7.1-cp313-cp313-win32.whl", hash = "sha256:da3dfd2776226e15d3288a3a24c7975f9ee160ba198f2efa66bc28c5ba76d792", size = 141481, upload-time = "2025-08-28T22:16:09.727Z" },
- { url = "https://files.pythonhosted.org/packages/f1/7e/1730701a865fd1e4353900d5821c96e68695aed88d121f8783aea14c4e74/bitarray-3.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:33f604bffd06b170637f8a48ddcf42074ed1e1980366ac46058e065ce04bfe2a", size = 148450, upload-time = "2025-08-28T22:16:10.959Z" },
-]
-
-[[package]]
-name = "cachetools"
-version = "5.5.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380, upload-time = "2025-02-20T21:01:19.524Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080, upload-time = "2025-02-20T21:01:16.647Z" },
-]
-
-[[package]]
-name = "certifi"
-version = "2025.8.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" },
-]
-
-[[package]]
-name = "cffi"
-version = "2.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pycparser", marker = "implementation_name != 'PyPy'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" },
- { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" },
- { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" },
- { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" },
- { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" },
- { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" },
- { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" },
- { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" },
- { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" },
- { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" },
- { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" },
- { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" },
- { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" },
- { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" },
- { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" },
- { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" },
- { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" },
- { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" },
- { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" },
- { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" },
- { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" },
- { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" },
- { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" },
- { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" },
- { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" },
- { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" },
- { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" },
- { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" },
- { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" },
- { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" },
- { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" },
- { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" },
- { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" },
- { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" },
- { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" },
- { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" },
- { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" },
- { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" },
- { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" },
- { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" },
- { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" },
- { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" },
- { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" },
- { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" },
- { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" },
- { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" },
-]
-
-[[package]]
-name = "charset-normalizer"
-version = "3.4.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" },
- { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" },
- { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" },
- { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" },
- { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" },
- { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" },
- { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" },
- { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" },
- { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" },
- { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" },
- { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" },
- { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" },
- { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" },
- { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" },
- { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" },
- { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" },
- { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" },
- { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" },
- { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" },
- { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" },
- { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" },
- { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" },
- { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" },
- { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" },
- { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" },
- { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" },
- { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" },
- { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" },
- { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" },
- { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" },
- { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" },
- { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" },
- { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" },
- { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" },
-]
-
-[[package]]
-name = "ckzg"
-version = "2.1.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7e/77/9a44934373eff2013cab641f4ac70b98bb8372fe2938ea78c349501aa825/ckzg-2.1.2.tar.gz", hash = "sha256:7d445215261068d914c3607fd89889bb405260911804cd0eea789ce7422db0d8", size = 1124054, upload-time = "2025-09-04T10:56:34.248Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/99/34/bc1261aeb3c173ce3eaf7f9050923823488d9e63ecfc4830e8b162168cb6/ckzg-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41181abbc3936c0f375c561cf01b9c210d6761b8d0d4bc8eadb52c38c3636e3e", size = 116304, upload-time = "2025-09-04T10:55:13.591Z" },
- { url = "https://files.pythonhosted.org/packages/69/17/cdec0fdd550560467792705af56880453e26c3dc9e9054144c0d7dc7ea5a/ckzg-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00d847cb39e6921dbead165a2f5a4434f3b4ed1455fac216acf8330941bce67a", size = 99951, upload-time = "2025-09-04T10:55:14.344Z" },
- { url = "https://files.pythonhosted.org/packages/3b/d7/b03cdd67ef4d5c07deb363737533c8e25e7c6b5348b9606873e75fb10820/ckzg-2.1.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f33abd137d90960e95a1620a35ea3a99d0b2d33272922d4c1325f3464833410", size = 176359, upload-time = "2025-09-04T10:55:15.139Z" },
- { url = "https://files.pythonhosted.org/packages/bb/00/67241dcedb40c8baa02e5cf831b77dbb908d54217a084a1f96749a93eba0/ckzg-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a8fcc29778f0e74ba9cf2a87b7ef1a354361602c0b323e2564a89b7f1a914ba", size = 161876, upload-time = "2025-09-04T10:55:15.945Z" },
- { url = "https://files.pythonhosted.org/packages/f7/64/bcfb3898ea04206ee4a175567665f7ea2bcc6b0cc6afaec1b4c08ed24509/ckzg-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba909a23522c1dbd85f5fb011a20603a7a4fd828b1bac3b144b78ab0a553c60f", size = 171124, upload-time = "2025-09-04T10:55:16.787Z" },
- { url = "https://files.pythonhosted.org/packages/17/61/d04b6715f28682678309fe532723f09eef0653bbb6bc3634bdfe08b9eeb2/ckzg-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f9ffe5a968acb976830cf24d266f3c25b8dee2730574cf6c4ddfa95dfe5ddbfc", size = 173489, upload-time = "2025-09-04T10:55:17.575Z" },
- { url = "https://files.pythonhosted.org/packages/f5/ff/7b24a023db001c5f9f99b81d8b45e7173c209485f7f7152d778f7e7b2b85/ckzg-2.1.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4d2bc2d809dfd7d7737f5021daeb67501eb63be05a458b50b8dd4453da5da16b", size = 188710, upload-time = "2025-09-04T10:55:18.878Z" },
- { url = "https://files.pythonhosted.org/packages/03/66/22a36b3e36c4f844c319b82a5b44a0a5bf9cbbd48c6b39644ee862241fcc/ckzg-2.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9b3110974e4982f0a6b0b44f2f29d2c915d443c012ef4898e91c3f4c38d8c5c2", size = 183460, upload-time = "2025-09-04T10:55:20.254Z" },
- { url = "https://files.pythonhosted.org/packages/02/d3/0ea9ddc370190e1345ecf5ee60071ec3084395ea83018002badaaf34d5d2/ckzg-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:f00f585128a2a2305b61988ce74b05d27eed5c2fcde4aea286790e7c7601ebae", size = 100694, upload-time = "2025-09-04T10:55:21.284Z" },
- { url = "https://files.pythonhosted.org/packages/11/ed/007ddc03613be6e8b246cace85edc943116fd78413a228789ca490775971/ckzg-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:457635f924610414b7e7460b1e5097187ca4c40406ea80c73848866267213fed", size = 116305, upload-time = "2025-09-04T10:55:22.186Z" },
- { url = "https://files.pythonhosted.org/packages/fc/9f/1a9df26c78b5f26c06a9a97948e12db434c2b4a784708b9214f72ad8cea7/ckzg-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:32b750784bef9fc7832dee07635eb46309eca5b55a6eb350ff40021b5fc483f2", size = 99956, upload-time = "2025-09-04T10:55:22.976Z" },
- { url = "https://files.pythonhosted.org/packages/7b/d8/9fc6537a8fcc0a373f0bb0cf2747e28e7aa99918c9d96385ef1f3ec51c9c/ckzg-2.1.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4eeff254f60b08dba7991d3ab20018d5df7cbe3318e67efd070d2361104e6d4", size = 176341, upload-time = "2025-09-04T10:55:23.792Z" },
- { url = "https://files.pythonhosted.org/packages/12/f1/06b20839ac10c4e839bad82e32ccf1078be810c972fdf703c08754fbd348/ckzg-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ad66afefac5836c340a853b543f932a9e98830359617414b1972233eaa5a069", size = 161827, upload-time = "2025-09-04T10:55:24.606Z" },
- { url = "https://files.pythonhosted.org/packages/c4/fa/04df1f37a4075c7e0032c960f037d14fead960db699504781fd421c735a4/ckzg-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3046c1541f9b3aed997860fdab106795ac4e8335cb1d3fe6a2a45958fb00ab", size = 171088, upload-time = "2025-09-04T10:55:25.388Z" },
- { url = "https://files.pythonhosted.org/packages/6a/9d/50b82acbf1f89159fb70853ecd42a5b67ecba0e298eebb31760bb41b2aa0/ckzg-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b98029c67d34bcf6b0e030d06505a1accc1829a378736e2cb69e4af852add99", size = 173505, upload-time = "2025-09-04T10:55:26.348Z" },
- { url = "https://files.pythonhosted.org/packages/61/6f/97085ef1002fcfd7620b774df13c918cd83a84247f1b5ece098073a3fc25/ckzg-2.1.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59541361c9402ec14790db88c16532e66ece8e56d985b75756f36387858549fa", size = 188738, upload-time = "2025-09-04T10:55:27.456Z" },
- { url = "https://files.pythonhosted.org/packages/2e/7a/e8208411860bd2dca57eae2771e045b1a4dcde8dc08004d74401ad74f23a/ckzg-2.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:edf41132119d67673af1cf6cbf22f3852d092e94c9c890ff153e761d7be6e684", size = 183486, upload-time = "2025-09-04T10:55:28.298Z" },
- { url = "https://files.pythonhosted.org/packages/41/28/8b381db79aa362e975e86c3bf2c85de6b9482923dc55f19bb21419d12994/ckzg-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:0074cbfe11702c1d413ed86a74d9fcfef48fcb206c31a37c0b3eeb830f6d0a05", size = 100693, upload-time = "2025-09-04T10:55:29.172Z" },
-]
-
-[[package]]
-name = "click"
-version = "8.2.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" },
-]
-
-[[package]]
-name = "colorama"
-version = "0.4.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
-]
-
-[[package]]
-name = "cryptography"
-version = "45.0.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cffi", marker = "platform_python_implementation != 'PyPy'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980, upload-time = "2025-09-01T11:15:03.146Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105, upload-time = "2025-09-01T11:13:59.684Z" },
- { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799, upload-time = "2025-09-01T11:14:02.517Z" },
- { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504, upload-time = "2025-09-01T11:14:04.522Z" },
- { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542, upload-time = "2025-09-01T11:14:06.309Z" },
- { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244, upload-time = "2025-09-01T11:14:08.152Z" },
- { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975, upload-time = "2025-09-01T11:14:09.755Z" },
- { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082, upload-time = "2025-09-01T11:14:11.229Z" },
- { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397, upload-time = "2025-09-01T11:14:12.924Z" },
- { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244, upload-time = "2025-09-01T11:14:14.431Z" },
- { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862, upload-time = "2025-09-01T11:14:16.185Z" },
- { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578, upload-time = "2025-09-01T11:14:17.638Z" },
- { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400, upload-time = "2025-09-01T11:14:18.958Z" },
- { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824, upload-time = "2025-09-01T11:14:20.954Z" },
- { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233, upload-time = "2025-09-01T11:14:22.454Z" },
- { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075, upload-time = "2025-09-01T11:14:24.287Z" },
- { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517, upload-time = "2025-09-01T11:14:25.679Z" },
- { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893, upload-time = "2025-09-01T11:14:27.1Z" },
- { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132, upload-time = "2025-09-01T11:14:28.58Z" },
- { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086, upload-time = "2025-09-01T11:14:30.572Z" },
- { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383, upload-time = "2025-09-01T11:14:32.046Z" },
- { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186, upload-time = "2025-09-01T11:14:33.95Z" },
- { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639, upload-time = "2025-09-01T11:14:35.343Z" },
- { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552, upload-time = "2025-09-01T11:14:36.929Z" },
- { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742, upload-time = "2025-09-01T11:14:38.368Z" },
-]
-
-[[package]]
-name = "cytoolz"
-version = "1.0.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "toolz" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a7/f9/3243eed3a6545c2a33a21f74f655e3fcb5d2192613cd3db81a93369eb339/cytoolz-1.0.1.tar.gz", hash = "sha256:89cc3161b89e1bb3ed7636f74ed2e55984fd35516904fc878cae216e42b2c7d6", size = 626652, upload-time = "2024-12-13T05:47:36.672Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d8/e8/218098344ed2cb5f8441fade9b2428e435e7073962374a9c71e59ac141a7/cytoolz-1.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fcb8f7d0d65db1269022e7e0428471edee8c937bc288ebdcb72f13eaa67c2fe4", size = 414121, upload-time = "2024-12-13T05:45:26.588Z" },
- { url = "https://files.pythonhosted.org/packages/de/27/4d729a5653718109262b758fec1a959aa9facb74c15460d9074dc76d6635/cytoolz-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:207d4e4b445e087e65556196ff472ff134370d9a275d591724142e255f384662", size = 390904, upload-time = "2024-12-13T05:45:27.718Z" },
- { url = "https://files.pythonhosted.org/packages/72/c0/cbabfa788bab9c6038953bf9478adaec06e88903a726946ea7c88092f5c4/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21cdf6bac6fd843f3b20280a66fd8df20dea4c58eb7214a2cd8957ec176f0bb3", size = 2090734, upload-time = "2024-12-13T05:45:30.515Z" },
- { url = "https://files.pythonhosted.org/packages/c3/66/369262c60f9423c2da82a60864a259c852f1aa122aced4acd2c679af58c0/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a55ec098036c0dea9f3bdc021f8acd9d105a945227d0811589f0573f21c9ce1", size = 2155933, upload-time = "2024-12-13T05:45:32.721Z" },
- { url = "https://files.pythonhosted.org/packages/aa/4e/ee55186802f8d24b5fbf9a11405ccd1203b30eded07cc17750618219b94e/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a13ab79ff4ce202e03ab646a2134696988b554b6dc4b71451e948403db1331d8", size = 2171903, upload-time = "2024-12-13T05:45:34.205Z" },
- { url = "https://files.pythonhosted.org/packages/a1/96/bd1a9f3396e9b7f618db8cd08d15630769ce3c8b7d0534f92cd639c977ae/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2d944799026e1ff08a83241f1027a2d9276c41f7a74224cd98b7df6e03957d", size = 2125270, upload-time = "2024-12-13T05:45:36.982Z" },
- { url = "https://files.pythonhosted.org/packages/28/48/2a3762873091c88a69e161111cfbc6c222ff145d57ff011a642b169f04f1/cytoolz-1.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88ba85834cd523b91fdf10325e1e6d71c798de36ea9bdc187ca7bd146420de6f", size = 1973967, upload-time = "2024-12-13T05:45:39.505Z" },
- { url = "https://files.pythonhosted.org/packages/e4/50/500bd69774bdc49a4d78ec8779eb6ac7c1a9d706bfd91cf2a1dba604373a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a750b1af7e8bf6727f588940b690d69e25dc47cce5ce467925a76561317eaf7", size = 2021695, upload-time = "2024-12-13T05:45:40.911Z" },
- { url = "https://files.pythonhosted.org/packages/e4/4e/ba5a0ce34869495eb50653de8d676847490cf13a2cac1760fc4d313e78de/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44a71870f7eae31d263d08b87da7c2bf1176f78892ed8bdade2c2850478cb126", size = 2010177, upload-time = "2024-12-13T05:45:42.48Z" },
- { url = "https://files.pythonhosted.org/packages/87/57/615c630b3089a13adb15351d958d227430cf624f03b1dd39eb52c34c1f59/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c8231b9abbd8e368e036f4cc2e16902c9482d4cf9e02a6147ed0e9a3cd4a9ab0", size = 2154321, upload-time = "2024-12-13T05:45:43.979Z" },
- { url = "https://files.pythonhosted.org/packages/7f/0f/fe1aa2d931e3b35ecc05215bd75da945ea7346095b3b6f6027164e602d5a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:aa87599ccc755de5a096a4d6c34984de6cd9dc928a0c5eaa7607457317aeaf9b", size = 2188374, upload-time = "2024-12-13T05:45:46.783Z" },
- { url = "https://files.pythonhosted.org/packages/de/fa/fd363d97a641b6d0e2fd1d5c35b8fd41d9ccaeb4df56302f53bf23a58e3a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:67cd16537df51baabde3baa770ab7b8d16839c4d21219d5b96ac59fb012ebd2d", size = 2077911, upload-time = "2024-12-13T05:45:48.219Z" },
- { url = "https://files.pythonhosted.org/packages/d9/68/0a22946b98ae5201b54ccb4e651295285c0fb79406022b6ee8b2f791940c/cytoolz-1.0.1-cp312-cp312-win32.whl", hash = "sha256:fb988c333f05ee30ad4693fe4da55d95ec0bb05775d2b60191236493ea2e01f9", size = 321903, upload-time = "2024-12-13T05:45:50.3Z" },
- { url = "https://files.pythonhosted.org/packages/62/1a/f3903197956055032f8cb297342e2dff07e50f83991aebfe5b4c4fcb55e4/cytoolz-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:8f89c48d8e5aec55ffd566a8ec858706d70ed0c6a50228eca30986bfa5b4da8b", size = 364490, upload-time = "2024-12-13T05:45:51.494Z" },
- { url = "https://files.pythonhosted.org/packages/aa/2e/a9f069db0107749e9e72baf6c21abe3f006841a3bcfdc9b8420e22ef31eb/cytoolz-1.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6944bb93b287032a4c5ca6879b69bcd07df46f3079cf8393958cf0b0454f50c0", size = 407365, upload-time = "2024-12-13T05:45:52.803Z" },
- { url = "https://files.pythonhosted.org/packages/a9/9b/5e87dd0e31f54c778b4f9f34cc14c1162d3096c8d746b0f8be97d70dd73c/cytoolz-1.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e027260fd2fc5cb041277158ac294fc13dca640714527219f702fb459a59823a", size = 385233, upload-time = "2024-12-13T05:45:53.994Z" },
- { url = "https://files.pythonhosted.org/packages/63/00/2fd32b16284cdb97cfe092822179bc0c3bcdd5e927dd39f986169a517642/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88662c0e07250d26f5af9bc95911e6137e124a5c1ec2ce4a5d74de96718ab242", size = 2062903, upload-time = "2024-12-13T05:45:55.202Z" },
- { url = "https://files.pythonhosted.org/packages/85/39/b3cbb5a9847ba59584a263772ad4f8ca2dbfd2a0e11efd09211d1219804c/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309dffa78b0961b4c0cf55674b828fbbc793cf2d816277a5c8293c0c16155296", size = 2139517, upload-time = "2024-12-13T05:45:56.804Z" },
- { url = "https://files.pythonhosted.org/packages/ea/39/bfcab4a46d50c467e36fe704f19d8904efead417787806ee210327f68390/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:edb34246e6eb40343c5860fc51b24937698e4fa1ee415917a73ad772a9a1746b", size = 2154849, upload-time = "2024-12-13T05:45:58.814Z" },
- { url = "https://files.pythonhosted.org/packages/fd/42/3bc6ee61b0aa47e1cb40819adc1a456d7efa809f0dea9faddacb43fdde8f/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54da7a8e4348a18d45d4d5bc84af6c716d7f131113a4f1cc45569d37edff1b", size = 2102302, upload-time = "2024-12-13T05:46:00.181Z" },
- { url = "https://files.pythonhosted.org/packages/00/66/3f636c6ddea7b18026b90a8c238af472e423b86e427b11df02213689b012/cytoolz-1.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:241c679c3b1913c0f7259cf1d9639bed5084c86d0051641d537a0980548aa266", size = 1960872, upload-time = "2024-12-13T05:46:01.612Z" },
- { url = "https://files.pythonhosted.org/packages/40/36/cb3b7cdd651007b69f9c48e9d104cec7cb8dc53afa1d6a720e5ad08022fa/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5bfc860251a8f280ac79696fc3343cfc3a7c30b94199e0240b6c9e5b6b01a2a5", size = 2014430, upload-time = "2024-12-13T05:46:03.022Z" },
- { url = "https://files.pythonhosted.org/packages/88/3f/2e9bd2a16cfd269808922147551dcb2d8b68ba54a2c4deca2fa6a6cd0d5f/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c8edd1547014050c1bdad3ff85d25c82bd1c2a3c96830c6181521eb78b9a42b3", size = 2003127, upload-time = "2024-12-13T05:46:04.401Z" },
- { url = "https://files.pythonhosted.org/packages/c4/7d/08604ff940aa784df8343c387fdf2489b948b714a6afb587775ae94da912/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b349bf6162e8de215403d7f35f8a9b4b1853dc2a48e6e1a609a5b1a16868b296", size = 2142369, upload-time = "2024-12-13T05:46:06.004Z" },
- { url = "https://files.pythonhosted.org/packages/d2/c6/39919a0645bdbdf720e97cae107f959ea9d1267fbc3b0d94fc6e1d12ac8f/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1b18b35256219b6c3dd0fa037741b85d0bea39c552eab0775816e85a52834140", size = 2180427, upload-time = "2024-12-13T05:46:07.526Z" },
- { url = "https://files.pythonhosted.org/packages/d8/03/dbb9d47556ee54337e7e0ac209d17ceff2d2a197c34de08005abc7a7449b/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:738b2350f340ff8af883eb301054eb724997f795d20d90daec7911c389d61581", size = 2069785, upload-time = "2024-12-13T05:46:10.122Z" },
- { url = "https://files.pythonhosted.org/packages/ea/f8/11bb7b8947002231faae3ec2342df5896afbc19eb783a332cce6d219ff79/cytoolz-1.0.1-cp313-cp313-win32.whl", hash = "sha256:9cbd9c103df54fcca42be55ef40e7baea624ac30ee0b8bf1149f21146d1078d9", size = 320685, upload-time = "2024-12-13T05:46:11.553Z" },
- { url = "https://files.pythonhosted.org/packages/40/eb/dde173cf2357084ca9423950be1f2f11ab11d65d8bd30165bfb8fd4213e9/cytoolz-1.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:90e577e08d3a4308186d9e1ec06876d4756b1e8164b92971c69739ea17e15297", size = 362898, upload-time = "2024-12-13T05:46:12.771Z" },
-]
-
-[[package]]
-name = "dataclasses-json"
-version = "0.6.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "marshmallow" },
- { name = "typing-inspect" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" },
-]
-
-[[package]]
-name = "deprecated"
-version = "1.2.18"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "wrapt" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744, upload-time = "2025-01-27T10:46:25.7Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998, upload-time = "2025-01-27T10:46:09.186Z" },
-]
-
-[[package]]
-name = "diskcache"
-version = "5.6.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/3f/21/1c1ffc1a039ddcc459db43cc108658f32c57d271d7289a2794e401d0fdb6/diskcache-5.6.3.tar.gz", hash = "sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc", size = 67916, upload-time = "2023-08-31T06:12:00.316Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3f/27/4570e78fc0bf5ea0ca45eb1de3818a23787af9b390c0b0a0033a1b8236f9/diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19", size = 45550, upload-time = "2023-08-31T06:11:58.822Z" },
-]
-
-[[package]]
-name = "distro"
-version = "1.9.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
-]
-
-[[package]]
-name = "docstring-parser"
-version = "0.17.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
-]
-
-[[package]]
-name = "duckduckgo-search"
-version = "8.1.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "lxml" },
- { name = "primp" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/10/ef/07791a05751e6cc9de1dd49fb12730259ee109b18e6d097e25e6c32d5617/duckduckgo_search-8.1.1.tar.gz", hash = "sha256:9da91c9eb26a17e016ea1da26235d40404b46b0565ea86d75a9f78cc9441f935", size = 22868, upload-time = "2025-07-06T15:30:59.73Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/db/72/c027b3b488b1010cf71670032fcf7e681d44b81829d484bb04e31a949a8d/duckduckgo_search-8.1.1-py3-none-any.whl", hash = "sha256:f48adbb06626ee05918f7e0cef3a45639e9939805c4fc179e68c48a12f1b5062", size = 18932, upload-time = "2025-07-06T15:30:58.339Z" },
-]
-
-[[package]]
-name = "dydantic"
-version = "0.0.8"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pydantic" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/08/c5/2d097e5a4816b15186c1ae06c5cfe3c332e69a0f3556dc6cee2d370acf2a/dydantic-0.0.8.tar.gz", hash = "sha256:14a31d4cdfce314ce3e69e8f8c7c46cbc26ce3ce4485de0832260386c612942f", size = 8115, upload-time = "2025-01-29T20:36:13.771Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7a/7c/a1b120141a300853d82291faf0ba1a95133fa390e4b7d773647b69c8c0f4/dydantic-0.0.8-py3-none-any.whl", hash = "sha256:cd0a991f523bd8632699872f1c0c4278415dd04783e36adec5428defa0afb721", size = 8637, upload-time = "2025-01-29T20:36:12.217Z" },
-]
-
-[[package]]
-name = "eth-abi"
-version = "5.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-typing" },
- { name = "eth-utils" },
- { name = "parsimonious" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/00/71/d9e1380bd77fd22f98b534699af564f189b56d539cc2b9dab908d4e4c242/eth_abi-5.2.0.tar.gz", hash = "sha256:178703fa98c07d8eecd5ae569e7e8d159e493ebb6eeb534a8fe973fbc4e40ef0", size = 49797, upload-time = "2025-01-14T16:29:34.629Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7a/b4/2f3982c4cbcbf5eeb6aec62df1533c0e63c653b3021ff338d44944405676/eth_abi-5.2.0-py3-none-any.whl", hash = "sha256:17abe47560ad753f18054f5b3089fcb588f3e3a092136a416b6c1502cb7e8877", size = 28511, upload-time = "2025-01-14T16:29:31.862Z" },
-]
-
-[[package]]
-name = "eth-account"
-version = "0.13.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "bitarray" },
- { name = "ckzg" },
- { name = "eth-abi" },
- { name = "eth-keyfile" },
- { name = "eth-keys" },
- { name = "eth-rlp" },
- { name = "eth-utils" },
- { name = "hexbytes" },
- { name = "pydantic" },
- { name = "rlp" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/74/cf/20f76a29be97339c969fd765f1237154286a565a1d61be98e76bb7af946a/eth_account-0.13.7.tar.gz", hash = "sha256:5853ecbcbb22e65411176f121f5f24b8afeeaf13492359d254b16d8b18c77a46", size = 935998, upload-time = "2025-04-21T21:11:21.204Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/46/18/088fb250018cbe665bc2111974301b2d59f294a565aff7564c4df6878da2/eth_account-0.13.7-py3-none-any.whl", hash = "sha256:39727de8c94d004ff61d10da7587509c04d2dc7eac71e04830135300bdfc6d24", size = 587452, upload-time = "2025-04-21T21:11:18.346Z" },
-]
-
-[[package]]
-name = "eth-hash"
-version = "0.7.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ee/38/577b7bc9380ef9dff0f1dffefe0c9a1ded2385e7a06c306fd95afb6f9451/eth_hash-0.7.1.tar.gz", hash = "sha256:d2411a403a0b0a62e8247b4117932d900ffb4c8c64b15f92620547ca5ce46be5", size = 12227, upload-time = "2025-01-13T21:29:21.765Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/eb/db/f8775490669d28aca24871c67dd56b3e72105cb3bcae9a4ec65dd70859b3/eth_hash-0.7.1-py3-none-any.whl", hash = "sha256:0fb1add2adf99ef28883fd6228eb447ef519ea72933535ad1a0b28c6f65f868a", size = 8028, upload-time = "2025-01-13T21:29:19.365Z" },
-]
-
-[[package]]
-name = "eth-keyfile"
-version = "0.8.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-keys" },
- { name = "eth-utils" },
- { name = "pycryptodome" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/35/66/dd823b1537befefbbff602e2ada88f1477c5b40ec3731e3d9bc676c5f716/eth_keyfile-0.8.1.tar.gz", hash = "sha256:9708bc31f386b52cca0969238ff35b1ac72bd7a7186f2a84b86110d3c973bec1", size = 12267, upload-time = "2024-04-23T20:28:53.862Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/88/fc/48a586175f847dd9e05e5b8994d2fe8336098781ec2e9836a2ad94280281/eth_keyfile-0.8.1-py3-none-any.whl", hash = "sha256:65387378b82fe7e86d7cb9f8d98e6d639142661b2f6f490629da09fddbef6d64", size = 7510, upload-time = "2024-04-23T20:28:51.063Z" },
-]
-
-[[package]]
-name = "eth-keys"
-version = "0.7.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-typing" },
- { name = "eth-utils" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/58/11/1ed831c50bd74f57829aa06e58bd82a809c37e070ee501c953b9ac1f1552/eth_keys-0.7.0.tar.gz", hash = "sha256:79d24fd876201df67741de3e3fefb3f4dbcbb6ace66e47e6fe662851a4547814", size = 30166, upload-time = "2025-04-07T17:40:21.697Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4d/25/0ae00f2b0095e559d61ad3dc32171bd5a29dfd95ab04b4edd641f7c75f72/eth_keys-0.7.0-py3-none-any.whl", hash = "sha256:b0cdda8ffe8e5ba69c7c5ca33f153828edcace844f67aabd4542d7de38b159cf", size = 20656, upload-time = "2025-04-07T17:40:20.441Z" },
-]
-
-[[package]]
-name = "eth-rlp"
-version = "2.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-utils" },
- { name = "hexbytes" },
- { name = "rlp" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/7f/ea/ad39d001fa9fed07fad66edb00af701e29b48be0ed44a3bcf58cb3adf130/eth_rlp-2.2.0.tar.gz", hash = "sha256:5e4b2eb1b8213e303d6a232dfe35ab8c29e2d3051b86e8d359def80cd21db83d", size = 7720, upload-time = "2025-02-04T21:51:08.134Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/99/3b/57efe2bc2df0980680d57c01a36516cd3171d2319ceb30e675de19fc2cc5/eth_rlp-2.2.0-py3-none-any.whl", hash = "sha256:5692d595a741fbaef1203db6a2fedffbd2506d31455a6ad378c8449ee5985c47", size = 4446, upload-time = "2025-02-04T21:51:05.823Z" },
-]
-
-[[package]]
-name = "eth-rpc-py"
-version = "0.1.34"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-abi" },
- { name = "eth-account" },
- { name = "eth-hash" },
- { name = "eth-typing" },
- { name = "httpx" },
- { name = "pydantic" },
- { name = "websockets" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/02/1d/441e11ff36c985ed436d4e8269276406febac669c0f9a14e69d566cff810/eth_rpc_py-0.1.34.tar.gz", hash = "sha256:73edbd2295543e4000aabe1f1ab639728aac3736174c3ae5bed5279507116a33", size = 125280, upload-time = "2025-09-10T09:52:48.873Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/78/10/348606d05cb14159b340603542942199a8fe37fac97039b52609349a3df5/eth_rpc_py-0.1.34-py3-none-any.whl", hash = "sha256:89da0289e891b86eb64a481b5cd292aa3d03adcbb833cedacef43b1bf9d825cc", size = 81766, upload-time = "2025-09-10T09:52:45.01Z" },
-]
-
-[[package]]
-name = "eth-typeshed-py"
-version = "0.1.34"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-rpc-py" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/76/65/9ea75127e6f8ddb8bb988ad08e6e756483b26d91c3ff1e95affd456e2979/eth_typeshed_py-0.1.34.tar.gz", hash = "sha256:26a88e459c277344b9d6ff5bcfabd54c46446448ead15c2d98c8e6e5c9f2a4e2", size = 27800, upload-time = "2025-09-10T09:52:49.534Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/bc/57/e255ed3b94e368126ae5073b708e1460cece0f8c8339654b9f75ae39969d/eth_typeshed_py-0.1.34-py3-none-any.whl", hash = "sha256:8315a5c694dfe380e66f4d641920ce1075e7a58db26eebd7c868d66f15186ece", size = 49497, upload-time = "2025-09-10T09:52:45.979Z" },
-]
-
-[[package]]
-name = "eth-typing"
-version = "5.2.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/60/54/62aa24b9cc708f06316167ee71c362779c8ed21fc8234a5cd94a8f53b623/eth_typing-5.2.1.tar.gz", hash = "sha256:7557300dbf02a93c70fa44af352b5c4a58f94e997a0fd6797fb7d1c29d9538ee", size = 21806, upload-time = "2025-04-14T20:39:28.217Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/30/72/c370bbe4c53da7bf998d3523f5a0f38867654923a82192df88d0705013d3/eth_typing-5.2.1-py3-none-any.whl", hash = "sha256:b0c2812ff978267563b80e9d701f487dd926f1d376d674f3b535cfe28b665d3d", size = 19163, upload-time = "2025-04-14T20:39:26.571Z" },
-]
-
-[[package]]
-name = "eth-utils"
-version = "5.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cytoolz", marker = "implementation_name == 'cpython'" },
- { name = "eth-hash" },
- { name = "eth-typing" },
- { name = "pydantic" },
- { name = "toolz", marker = "implementation_name == 'pypy'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/e6/e1/ee3a8728227c3558853e63ff35bd4c449abdf5022a19601369400deacd39/eth_utils-5.3.1.tar.gz", hash = "sha256:c94e2d2abd024a9a42023b4ddc1c645814ff3d6a737b33d5cfd890ebf159c2d1", size = 123506, upload-time = "2025-08-27T16:37:17.378Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/bf/4d/257cdc01ada430b8e84b9f2385c2553f33218f5b47da9adf0a616308d4b7/eth_utils-5.3.1-py3-none-any.whl", hash = "sha256:1f5476d8f29588d25b8ae4987e1ffdfae6d4c09026e476c4aad13b32dda3ead0", size = 102529, upload-time = "2025-08-27T16:37:15.449Z" },
-]
-
-[[package]]
-name = "fastapi"
-version = "0.115.6"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pydantic" },
- { name = "starlette" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/93/72/d83b98cd106541e8f5e5bfab8ef2974ab45a62e8a6c5b5e6940f26d2ed4b/fastapi-0.115.6.tar.gz", hash = "sha256:9ec46f7addc14ea472958a96aae5b5de65f39721a46aaf5705c480d9a8b76654", size = 301336, upload-time = "2024-12-03T22:46:01.629Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/52/b3/7e4df40e585df024fac2f80d1a2d579c854ac37109675db2b0cc22c0bb9e/fastapi-0.115.6-py3-none-any.whl", hash = "sha256:e9240b29e36fa8f4bb7290316988e90c381e5092e0cbe84e7818cc3713bcf305", size = 94843, upload-time = "2024-12-03T22:45:59.368Z" },
-]
-
-[[package]]
-name = "frozenlist"
-version = "1.7.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" },
- { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" },
- { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" },
- { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" },
- { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" },
- { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" },
- { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" },
- { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" },
- { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" },
- { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" },
- { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" },
- { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" },
- { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" },
- { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" },
- { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" },
- { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" },
- { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" },
- { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" },
- { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" },
- { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" },
- { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" },
- { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" },
- { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" },
- { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" },
- { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" },
- { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" },
- { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" },
- { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" },
- { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" },
- { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" },
- { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" },
- { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" },
- { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" },
- { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" },
- { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" },
- { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" },
- { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" },
- { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" },
- { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" },
- { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" },
- { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" },
- { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" },
- { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" },
- { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" },
- { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" },
- { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" },
- { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" },
- { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" },
- { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" },
- { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" },
- { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" },
- { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" },
-]
-
-[[package]]
-name = "google-api-core"
-version = "2.25.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "google-auth" },
- { name = "googleapis-common-protos" },
- { name = "proto-plus" },
- { name = "protobuf" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/dc/21/e9d043e88222317afdbdb567165fdbc3b0aad90064c7e0c9eb0ad9955ad8/google_api_core-2.25.1.tar.gz", hash = "sha256:d2aaa0b13c78c61cb3f4282c464c046e45fbd75755683c9c525e6e8f7ed0a5e8", size = 165443, upload-time = "2025-06-12T20:52:20.439Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/14/4b/ead00905132820b623732b175d66354e9d3e69fcf2a5dcdab780664e7896/google_api_core-2.25.1-py3-none-any.whl", hash = "sha256:8a2a56c1fef82987a524371f99f3bd0143702fecc670c72e600c1cda6bf8dbb7", size = 160807, upload-time = "2025-06-12T20:52:19.334Z" },
-]
-
-[[package]]
-name = "google-api-python-client"
-version = "2.181.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "google-api-core" },
- { name = "google-auth" },
- { name = "google-auth-httplib2" },
- { name = "httplib2" },
- { name = "uritemplate" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c2/96/5561a5d7e37781c880ca90975a70d61940ec1648b2b12e991311a9e39f83/google_api_python_client-2.181.0.tar.gz", hash = "sha256:d7060962a274a16a2c6f8fb4b1569324dbff11bfbca8eb050b88ead1dd32261c", size = 13545438, upload-time = "2025-09-02T15:41:33.852Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/be/03/72b7acf374a2cde9255df161686f00d8370117ac33e2bdd8fdadfe30272a/google_api_python_client-2.181.0-py3-none-any.whl", hash = "sha256:348730e3ece46434a01415f3d516d7a0885c8e624ce799f50f2d4d86c2475fb7", size = 14111793, upload-time = "2025-09-02T15:41:31.322Z" },
-]
-
-[[package]]
-name = "google-auth"
-version = "2.40.3"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cachetools" },
- { name = "pyasn1-modules" },
- { name = "rsa" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9e/9b/e92ef23b84fa10a64ce4831390b7a4c2e53c0132568d99d4ae61d04c8855/google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77", size = 281029, upload-time = "2025-06-04T18:04:57.577Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/17/63/b19553b658a1692443c62bd07e5868adaa0ad746a0751ba62c59568cd45b/google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca", size = 216137, upload-time = "2025-06-04T18:04:55.573Z" },
-]
-
-[[package]]
-name = "google-auth-httplib2"
-version = "0.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "google-auth" },
- { name = "httplib2" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/56/be/217a598a818567b28e859ff087f347475c807a5649296fb5a817c58dacef/google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05", size = 10842, upload-time = "2023-12-12T17:40:30.722Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/be/8a/fe34d2f3f9470a27b01c9e76226965863f153d5fbe276f83608562e49c04/google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d", size = 9253, upload-time = "2023-12-12T17:40:13.055Z" },
-]
-
-[[package]]
-name = "googleapis-common-protos"
-version = "1.70.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "protobuf" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/39/24/33db22342cf4a2ea27c9955e6713140fedd51e8b141b5ce5260897020f1a/googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257", size = 145903, upload-time = "2025-04-14T10:17:02.924Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/86/f1/62a193f0227cf15a920390abe675f386dec35f7ae3ffe6da582d3ade42c7/googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8", size = 294530, upload-time = "2025-04-14T10:17:01.271Z" },
-]
-
-[[package]]
-name = "greenlet"
-version = "3.2.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" },
- { url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" },
- { url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" },
- { url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" },
- { url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" },
- { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" },
- { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" },
- { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" },
- { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" },
- { url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" },
- { url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" },
- { url = "https://files.pythonhosted.org/packages/f7/0b/bc13f787394920b23073ca3b6c4a7a21396301ed75a655bcb47196b50e6e/greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc", size = 655191, upload-time = "2025-08-07T13:45:29.752Z" },
- { url = "https://files.pythonhosted.org/packages/f2/d6/6adde57d1345a8d0f14d31e4ab9c23cfe8e2cd39c3baf7674b4b0338d266/greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a", size = 649516, upload-time = "2025-08-07T13:53:16.314Z" },
- { url = "https://files.pythonhosted.org/packages/7f/3b/3a3328a788d4a473889a2d403199932be55b1b0060f4ddd96ee7cdfcad10/greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504", size = 652169, upload-time = "2025-08-07T13:18:32.861Z" },
- { url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" },
- { url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" },
- { url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" },
- { url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" },
- { url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" },
- { url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" },
- { url = "https://files.pythonhosted.org/packages/c0/aa/687d6b12ffb505a4447567d1f3abea23bd20e73a5bed63871178e0831b7a/greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5", size = 699218, upload-time = "2025-08-07T13:45:30.969Z" },
- { url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" },
- { url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" },
- { url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" },
- { url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" },
-]
-
-[[package]]
-name = "h11"
-version = "0.16.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
-]
-
-[[package]]
-name = "hexbytes"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7f/87/adf4635b4b8c050283d74e6db9a81496063229c9263e6acc1903ab79fbec/hexbytes-1.3.1.tar.gz", hash = "sha256:a657eebebdfe27254336f98d8af6e2236f3f83aed164b87466b6cf6c5f5a4765", size = 8633, upload-time = "2025-05-14T16:45:17.5Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8d/e0/3b31492b1c89da3c5a846680517871455b30c54738486fc57ac79a5761bd/hexbytes-1.3.1-py3-none-any.whl", hash = "sha256:da01ff24a1a9a2b1881c4b85f0e9f9b0f51b526b379ffa23832ae7899d29c2c7", size = 5074, upload-time = "2025-05-14T16:45:16.179Z" },
-]
-
-[[package]]
-name = "httpcore"
-version = "1.0.9"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "certifi" },
- { name = "h11" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
-]
-
-[[package]]
-name = "httplib2"
-version = "0.30.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pyparsing" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/5b/75/1d10a90b3411f707c10c226fa918cf4f5e0578113caa223369130f702b6b/httplib2-0.30.0.tar.gz", hash = "sha256:d5b23c11fcf8e57e00ff91b7008656af0f6242c8886fd97065c97509e4e548c5", size = 249764, upload-time = "2025-08-29T18:58:36.497Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/87/7c/f35bd530a35654ef3ff81f5e102572b8b620361659e090beb85a73a3bcc9/httplib2-0.30.0-py3-none-any.whl", hash = "sha256:d10443a2bdfe0ea5dbb17e016726146d48b574208dafd41e854cf34e7d78842c", size = 91101, upload-time = "2025-08-29T18:58:33.224Z" },
-]
-
-[[package]]
-name = "httpx"
-version = "0.28.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
- { name = "certifi" },
- { name = "httpcore" },
- { name = "idna" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
-]
-
-[[package]]
-name = "httpx-sse"
-version = "0.4.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6e/fa/66bd985dd0b7c109a3bcb89272ee0bfb7e2b4d06309ad7b38ff866734b2a/httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e", size = 12998, upload-time = "2025-06-24T13:21:05.71Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" },
-]
-
-[[package]]
-name = "idna"
-version = "3.10"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
-]
-
-[[package]]
-name = "iniconfig"
-version = "2.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
-]
-
-[[package]]
-name = "instructor"
-version = "1.10.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "aiohttp" },
- { name = "diskcache" },
- { name = "docstring-parser" },
- { name = "jinja2" },
- { name = "jiter" },
- { name = "openai" },
- { name = "pydantic" },
- { name = "pydantic-core" },
- { name = "requests" },
- { name = "rich" },
- { name = "tenacity" },
- { name = "typer" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a5/67/63c4b4d2cc3c7b4238920ad3388a6f5d67265ab7c09ee34012d6b591130e/instructor-1.10.0.tar.gz", hash = "sha256:887d33e058b913290dbf526b0096b1bb8d7ea1a07d75afecbf716161f959697b", size = 69388981, upload-time = "2025-07-18T15:28:52.386Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2c/fb/ffc1ade9779795a8dc8e2379b1bfb522161ee7df8df12722f50d348fb4ea/instructor-1.10.0-py3-none-any.whl", hash = "sha256:9c789f0fce915d5498059afb5314530c8a5b22b0283302679148ddae98f732b0", size = 119455, upload-time = "2025-07-18T15:28:48.785Z" },
-]
-
-[[package]]
-name = "ipfshttpclient"
-version = "0.7.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "multiaddr" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/5e/c2/179b22ebf1ba5a4ea788fa72c65e4aebec4bf7b0b5e5f7621d938a311f0c/ipfshttpclient-0.7.0.tar.gz", hash = "sha256:feb1033c14c3ac87ee81264176c5beefeaf386385804427160466117ccc43693", size = 111742, upload-time = "2021-03-15T10:33:39.776Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/54/c8/0cbde4f343764848485298a45d1ab603a888f0162d5320cce8fc761a0dcd/ipfshttpclient-0.7.0-py3-none-any.whl", hash = "sha256:161c348e91cdc194c06c8725446a51a2d758ff2cc5ea97ec98f49e2af2465405", size = 82698, upload-time = "2021-03-15T10:33:37.324Z" },
-]
-
-[[package]]
-name = "isort"
-version = "5.12.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a9/c4/dc00e42c158fc4dda2afebe57d2e948805c06d5169007f1724f0683010a9/isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", size = 174643, upload-time = "2023-01-28T17:10:22.636Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0a/63/4036ae70eea279c63e2304b91ee0ac182f467f24f86394ecfe726092340b/isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6", size = 91198, upload-time = "2023-01-28T17:10:21.149Z" },
-]
-
-[[package]]
-name = "jinja2"
-version = "3.1.6"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markupsafe" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
-]
-
-[[package]]
-name = "jiter"
-version = "0.10.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ee/9d/ae7ddb4b8ab3fb1b51faf4deb36cb48a4fbbd7cb36bad6a5fca4741306f7/jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500", size = 162759, upload-time = "2025-05-18T19:04:59.73Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6d/b5/348b3313c58f5fbfb2194eb4d07e46a35748ba6e5b3b3046143f3040bafa/jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b", size = 312262, upload-time = "2025-05-18T19:03:44.637Z" },
- { url = "https://files.pythonhosted.org/packages/9c/4a/6a2397096162b21645162825f058d1709a02965606e537e3304b02742e9b/jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744", size = 320124, upload-time = "2025-05-18T19:03:46.341Z" },
- { url = "https://files.pythonhosted.org/packages/2a/85/1ce02cade7516b726dd88f59a4ee46914bf79d1676d1228ef2002ed2f1c9/jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2", size = 345330, upload-time = "2025-05-18T19:03:47.596Z" },
- { url = "https://files.pythonhosted.org/packages/75/d0/bb6b4f209a77190ce10ea8d7e50bf3725fc16d3372d0a9f11985a2b23eff/jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026", size = 369670, upload-time = "2025-05-18T19:03:49.334Z" },
- { url = "https://files.pythonhosted.org/packages/a0/f5/a61787da9b8847a601e6827fbc42ecb12be2c925ced3252c8ffcb56afcaf/jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c", size = 489057, upload-time = "2025-05-18T19:03:50.66Z" },
- { url = "https://files.pythonhosted.org/packages/12/e4/6f906272810a7b21406c760a53aadbe52e99ee070fc5c0cb191e316de30b/jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959", size = 389372, upload-time = "2025-05-18T19:03:51.98Z" },
- { url = "https://files.pythonhosted.org/packages/e2/ba/77013b0b8ba904bf3762f11e0129b8928bff7f978a81838dfcc958ad5728/jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a", size = 352038, upload-time = "2025-05-18T19:03:53.703Z" },
- { url = "https://files.pythonhosted.org/packages/67/27/c62568e3ccb03368dbcc44a1ef3a423cb86778a4389e995125d3d1aaa0a4/jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95", size = 391538, upload-time = "2025-05-18T19:03:55.046Z" },
- { url = "https://files.pythonhosted.org/packages/c0/72/0d6b7e31fc17a8fdce76164884edef0698ba556b8eb0af9546ae1a06b91d/jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea", size = 523557, upload-time = "2025-05-18T19:03:56.386Z" },
- { url = "https://files.pythonhosted.org/packages/2f/09/bc1661fbbcbeb6244bd2904ff3a06f340aa77a2b94e5a7373fd165960ea3/jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b", size = 514202, upload-time = "2025-05-18T19:03:57.675Z" },
- { url = "https://files.pythonhosted.org/packages/1b/84/5a5d5400e9d4d54b8004c9673bbe4403928a00d28529ff35b19e9d176b19/jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01", size = 211781, upload-time = "2025-05-18T19:03:59.025Z" },
- { url = "https://files.pythonhosted.org/packages/9b/52/7ec47455e26f2d6e5f2ea4951a0652c06e5b995c291f723973ae9e724a65/jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49", size = 206176, upload-time = "2025-05-18T19:04:00.305Z" },
- { url = "https://files.pythonhosted.org/packages/2e/b0/279597e7a270e8d22623fea6c5d4eeac328e7d95c236ed51a2b884c54f70/jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644", size = 311617, upload-time = "2025-05-18T19:04:02.078Z" },
- { url = "https://files.pythonhosted.org/packages/91/e3/0916334936f356d605f54cc164af4060e3e7094364add445a3bc79335d46/jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a", size = 318947, upload-time = "2025-05-18T19:04:03.347Z" },
- { url = "https://files.pythonhosted.org/packages/6a/8e/fd94e8c02d0e94539b7d669a7ebbd2776e51f329bb2c84d4385e8063a2ad/jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6", size = 344618, upload-time = "2025-05-18T19:04:04.709Z" },
- { url = "https://files.pythonhosted.org/packages/6f/b0/f9f0a2ec42c6e9c2e61c327824687f1e2415b767e1089c1d9135f43816bd/jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3", size = 368829, upload-time = "2025-05-18T19:04:06.912Z" },
- { url = "https://files.pythonhosted.org/packages/e8/57/5bbcd5331910595ad53b9fd0c610392ac68692176f05ae48d6ce5c852967/jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2", size = 491034, upload-time = "2025-05-18T19:04:08.222Z" },
- { url = "https://files.pythonhosted.org/packages/9b/be/c393df00e6e6e9e623a73551774449f2f23b6ec6a502a3297aeeece2c65a/jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25", size = 388529, upload-time = "2025-05-18T19:04:09.566Z" },
- { url = "https://files.pythonhosted.org/packages/42/3e/df2235c54d365434c7f150b986a6e35f41ebdc2f95acea3036d99613025d/jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041", size = 350671, upload-time = "2025-05-18T19:04:10.98Z" },
- { url = "https://files.pythonhosted.org/packages/c6/77/71b0b24cbcc28f55ab4dbfe029f9a5b73aeadaba677843fc6dc9ed2b1d0a/jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca", size = 390864, upload-time = "2025-05-18T19:04:12.722Z" },
- { url = "https://files.pythonhosted.org/packages/6a/d3/ef774b6969b9b6178e1d1e7a89a3bd37d241f3d3ec5f8deb37bbd203714a/jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4", size = 522989, upload-time = "2025-05-18T19:04:14.261Z" },
- { url = "https://files.pythonhosted.org/packages/0c/41/9becdb1d8dd5d854142f45a9d71949ed7e87a8e312b0bede2de849388cb9/jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e", size = 513495, upload-time = "2025-05-18T19:04:15.603Z" },
- { url = "https://files.pythonhosted.org/packages/9c/36/3468e5a18238bdedae7c4d19461265b5e9b8e288d3f86cd89d00cbb48686/jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d", size = 211289, upload-time = "2025-05-18T19:04:17.541Z" },
- { url = "https://files.pythonhosted.org/packages/7e/07/1c96b623128bcb913706e294adb5f768fb7baf8db5e1338ce7b4ee8c78ef/jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4", size = 205074, upload-time = "2025-05-18T19:04:19.21Z" },
- { url = "https://files.pythonhosted.org/packages/54/46/caa2c1342655f57d8f0f2519774c6d67132205909c65e9aa8255e1d7b4f4/jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca", size = 318225, upload-time = "2025-05-18T19:04:20.583Z" },
- { url = "https://files.pythonhosted.org/packages/43/84/c7d44c75767e18946219ba2d703a5a32ab37b0bc21886a97bc6062e4da42/jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070", size = 350235, upload-time = "2025-05-18T19:04:22.363Z" },
- { url = "https://files.pythonhosted.org/packages/01/16/f5a0135ccd968b480daad0e6ab34b0c7c5ba3bc447e5088152696140dcb3/jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca", size = 207278, upload-time = "2025-05-18T19:04:23.627Z" },
- { url = "https://files.pythonhosted.org/packages/1c/9b/1d646da42c3de6c2188fdaa15bce8ecb22b635904fc68be025e21249ba44/jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522", size = 310866, upload-time = "2025-05-18T19:04:24.891Z" },
- { url = "https://files.pythonhosted.org/packages/ad/0e/26538b158e8a7c7987e94e7aeb2999e2e82b1f9d2e1f6e9874ddf71ebda0/jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8", size = 318772, upload-time = "2025-05-18T19:04:26.161Z" },
- { url = "https://files.pythonhosted.org/packages/7b/fb/d302893151caa1c2636d6574d213e4b34e31fd077af6050a9c5cbb42f6fb/jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216", size = 344534, upload-time = "2025-05-18T19:04:27.495Z" },
- { url = "https://files.pythonhosted.org/packages/01/d8/5780b64a149d74e347c5128d82176eb1e3241b1391ac07935693466d6219/jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4", size = 369087, upload-time = "2025-05-18T19:04:28.896Z" },
- { url = "https://files.pythonhosted.org/packages/e8/5b/f235a1437445160e777544f3ade57544daf96ba7e96c1a5b24a6f7ac7004/jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426", size = 490694, upload-time = "2025-05-18T19:04:30.183Z" },
- { url = "https://files.pythonhosted.org/packages/85/a9/9c3d4617caa2ff89cf61b41e83820c27ebb3f7b5fae8a72901e8cd6ff9be/jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12", size = 388992, upload-time = "2025-05-18T19:04:32.028Z" },
- { url = "https://files.pythonhosted.org/packages/68/b1/344fd14049ba5c94526540af7eb661871f9c54d5f5601ff41a959b9a0bbd/jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9", size = 351723, upload-time = "2025-05-18T19:04:33.467Z" },
- { url = "https://files.pythonhosted.org/packages/41/89/4c0e345041186f82a31aee7b9d4219a910df672b9fef26f129f0cda07a29/jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a", size = 392215, upload-time = "2025-05-18T19:04:34.827Z" },
- { url = "https://files.pythonhosted.org/packages/55/58/ee607863e18d3f895feb802154a2177d7e823a7103f000df182e0f718b38/jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853", size = 522762, upload-time = "2025-05-18T19:04:36.19Z" },
- { url = "https://files.pythonhosted.org/packages/15/d0/9123fb41825490d16929e73c212de9a42913d68324a8ce3c8476cae7ac9d/jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86", size = 513427, upload-time = "2025-05-18T19:04:37.544Z" },
- { url = "https://files.pythonhosted.org/packages/d8/b3/2bd02071c5a2430d0b70403a34411fc519c2f227da7b03da9ba6a956f931/jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357", size = 210127, upload-time = "2025-05-18T19:04:38.837Z" },
- { url = "https://files.pythonhosted.org/packages/03/0c/5fe86614ea050c3ecd728ab4035534387cd41e7c1855ef6c031f1ca93e3f/jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00", size = 318527, upload-time = "2025-05-18T19:04:40.612Z" },
- { url = "https://files.pythonhosted.org/packages/b3/4a/4175a563579e884192ba6e81725fc0448b042024419be8d83aa8a80a3f44/jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5", size = 354213, upload-time = "2025-05-18T19:04:41.894Z" },
-]
-
-[[package]]
-name = "joblib"
-version = "1.5.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e8/5d/447af5ea094b9e4c4054f82e223ada074c552335b9b4b2d14bd9b35a67c4/joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55", size = 331077, upload-time = "2025-08-27T12:15:46.575Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241", size = 308396, upload-time = "2025-08-27T12:15:45.188Z" },
-]
-
-[[package]]
-name = "jsonpatch"
-version = "1.33"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "jsonpointer" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" },
-]
-
-[[package]]
-name = "jsonpointer"
-version = "3.0.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" },
-]
-
-[[package]]
-name = "langchain"
-version = "0.3.26"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
- { name = "langchain-text-splitters" },
- { name = "langsmith" },
- { name = "pydantic" },
- { name = "pyyaml" },
- { name = "requests" },
- { name = "sqlalchemy" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/7f/13/a9931800ee42bbe0f8850dd540de14e80dda4945e7ee36e20b5d5964286e/langchain-0.3.26.tar.gz", hash = "sha256:8ff034ee0556d3e45eff1f1e96d0d745ced57858414dba7171c8ebdbeb5580c9", size = 10226808, upload-time = "2025-06-20T22:23:01.174Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f1/f2/c09a2e383283e3af1db669ab037ac05a45814f4b9c472c48dc24c0cef039/langchain-0.3.26-py3-none-any.whl", hash = "sha256:361bb2e61371024a8c473da9f9c55f4ee50f269c5ab43afdb2b1309cb7ac36cf", size = 1012336, upload-time = "2025-06-20T22:22:58.874Z" },
-]
-
-[[package]]
-name = "langchain-anthropic"
-version = "0.3.19"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anthropic" },
- { name = "langchain-core" },
- { name = "pydantic" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1f/ab/bdaefa42fdab238efff45eb28c6cd74c011979092408decdae22c0bf7e66/langchain_anthropic-0.3.19.tar.gz", hash = "sha256:e62259382586ee5c44e9a9459d00b74a7e191550e5fadfad28f0daa5d143d745", size = 281502, upload-time = "2025-08-18T18:33:36.811Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a2/69/64473db52d02715f3815df3b25c9816b5801a58762a5ae62a3e5b84169a0/langchain_anthropic-0.3.19-py3-none-any.whl", hash = "sha256:5b5372ef7e10ee32b4308b4d9e1ed623c360b7d0a233c017e5209ad8118d5ab7", size = 31775, upload-time = "2025-08-18T18:33:35.596Z" },
-]
-
-[[package]]
-name = "langchain-community"
-version = "0.3.27"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "aiohttp" },
- { name = "dataclasses-json" },
- { name = "httpx-sse" },
- { name = "langchain" },
- { name = "langchain-core" },
- { name = "langsmith" },
- { name = "numpy" },
- { name = "pydantic-settings" },
- { name = "pyyaml" },
- { name = "requests" },
- { name = "sqlalchemy" },
- { name = "tenacity" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/5c/76/200494f6de488217a196c4369e665d26b94c8c3642d46e2fd62f9daf0a3a/langchain_community-0.3.27.tar.gz", hash = "sha256:e1037c3b9da0c6d10bf06e838b034eb741e016515c79ef8f3f16e53ead33d882", size = 33237737, upload-time = "2025-07-02T18:47:02.329Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c8/bc/f8c7dae8321d37ed39ac9d7896617c4203248240a4835b136e3724b3bb62/langchain_community-0.3.27-py3-none-any.whl", hash = "sha256:581f97b795f9633da738ea95da9cb78f8879b538090c9b7a68c0aed49c828f0d", size = 2530442, upload-time = "2025-07-02T18:47:00.246Z" },
-]
-
-[[package]]
-name = "langchain-core"
-version = "0.3.75"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "jsonpatch" },
- { name = "langsmith" },
- { name = "packaging" },
- { name = "pydantic" },
- { name = "pyyaml" },
- { name = "tenacity" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/06/63/270b71a23e849984505ddc7c5c9fd3f4bd9cb14b1a484ee44c4e51c33cc2/langchain_core-0.3.75.tar.gz", hash = "sha256:ab0eb95a06ed6043f76162e6086b45037690cb70b7f090bd83b5ebb8a05b70ed", size = 570876, upload-time = "2025-08-26T15:24:12.246Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fb/42/0d0221cce6f168f644d7d96cb6c87c4e42fc55d2941da7a36e970e3ab8ab/langchain_core-0.3.75-py3-none-any.whl", hash = "sha256:03ca1fadf955ee3c7d5806a841f4b3a37b816acea5e61a7e6ba1298c05eea7f5", size = 443986, upload-time = "2025-08-26T15:24:10.883Z" },
-]
-
-[[package]]
-name = "langchain-openai"
-version = "0.3.28"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
- { name = "openai" },
- { name = "tiktoken" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/6b/1d/90cd764c62d5eb822113d3debc3abe10c8807d2c0af90917bfe09acd6f86/langchain_openai-0.3.28.tar.gz", hash = "sha256:6c669548dbdea325c034ae5ef699710e2abd054c7354fdb3ef7bf909dc739d9e", size = 753951, upload-time = "2025-07-14T10:50:44.076Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/91/56/75f3d84b69b8bdae521a537697375e1241377627c32b78edcae337093502/langchain_openai-0.3.28-py3-none-any.whl", hash = "sha256:4cd6d80a5b2ae471a168017bc01b2e0f01548328d83532400a001623624ede67", size = 70571, upload-time = "2025-07-14T10:50:42.492Z" },
-]
-
-[[package]]
-name = "langchain-text-splitters"
-version = "0.3.11"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/11/43/dcda8fd25f0b19cb2835f2f6bb67f26ad58634f04ac2d8eae00526b0fa55/langchain_text_splitters-0.3.11.tar.gz", hash = "sha256:7a50a04ada9a133bbabb80731df7f6ddac51bc9f1b9cab7fa09304d71d38a6cc", size = 46458, upload-time = "2025-08-31T23:02:58.316Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/58/0d/41a51b40d24ff0384ec4f7ab8dd3dcea8353c05c973836b5e289f1465d4f/langchain_text_splitters-0.3.11-py3-none-any.whl", hash = "sha256:cf079131166a487f1372c8ab5d0bfaa6c0a4291733d9c43a34a16ac9bcd6a393", size = 33845, upload-time = "2025-08-31T23:02:57.195Z" },
-]
-
-[[package]]
-name = "langgraph"
-version = "0.6.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
- { name = "langgraph-checkpoint" },
- { name = "langgraph-prebuilt" },
- { name = "langgraph-sdk" },
- { name = "pydantic" },
- { name = "xxhash" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/56/85/36feb25062da40ca395f6c44d0232a672842e5421885101f6faf4670b670/langgraph-0.6.7.tar.gz", hash = "sha256:ba7fd17b8220142d6a4269b6038f2b3dcbcef42cd5ecf4a4c8d9b60b010830a6", size = 465534, upload-time = "2025-09-07T16:49:42.895Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/67/06/f440922a58204dbfd10f7fdda0de0325529a159e9dc3d1038afe4b431a49/langgraph-0.6.7-py3-none-any.whl", hash = "sha256:c724dd8c24806b70faf4903e8e20c0234f8c0a356e0e96a88035cbecca9df2cf", size = 153329, upload-time = "2025-09-07T16:49:40.45Z" },
-]
-
-[[package]]
-name = "langgraph-checkpoint"
-version = "2.1.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
- { name = "ormsgpack" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/73/3e/d00eb2b56c3846a0cabd2e5aa71c17a95f882d4f799a6ffe96a19b55eba9/langgraph_checkpoint-2.1.1.tar.gz", hash = "sha256:72038c0f9e22260cb9bff1f3ebe5eb06d940b7ee5c1e4765019269d4f21cf92d", size = 136256, upload-time = "2025-07-17T13:07:52.411Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4c/dd/64686797b0927fb18b290044be12ae9d4df01670dce6bb2498d5ab65cb24/langgraph_checkpoint-2.1.1-py3-none-any.whl", hash = "sha256:5a779134fd28134a9a83d078be4450bbf0e0c79fdf5e992549658899e6fc5ea7", size = 43925, upload-time = "2025-07-17T13:07:51.023Z" },
-]
-
-[[package]]
-name = "langgraph-prebuilt"
-version = "0.6.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain-core" },
- { name = "langgraph-checkpoint" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d6/21/9b198d11732101ee8cdf30af98d0b4f11254c768de15173e57f5260fd14b/langgraph_prebuilt-0.6.4.tar.gz", hash = "sha256:e9e53b906ee5df46541d1dc5303239e815d3ec551e52bb03dd6463acc79ec28f", size = 125695, upload-time = "2025-08-07T18:17:57.333Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0a/7f/973b0d9729d9693d6e5b4bc5f3ae41138d194cb7b16b0ed230020beeb13a/langgraph_prebuilt-0.6.4-py3-none-any.whl", hash = "sha256:819f31d88b84cb2729ff1b79db2d51e9506b8fb7aaacfc0d359d4fe16e717344", size = 28025, upload-time = "2025-08-07T18:17:56.493Z" },
-]
-
-[[package]]
-name = "langgraph-sdk"
-version = "0.2.6"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "httpx" },
- { name = "orjson" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/55/35/a1caf4fdb725adec30f1e9562f218524a92d8b675deb97be653687f086ee/langgraph_sdk-0.2.6.tar.gz", hash = "sha256:7db27cd86d1231fa614823ff416fcd2541b5565ad78ae950f31ae96d7af7c519", size = 80346, upload-time = "2025-09-04T01:51:11.262Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c6/d2/c5fac919601b27a0af5df0bde46e7f1361d5e04505e404b75bed45d21fc8/langgraph_sdk-0.2.6-py3-none-any.whl", hash = "sha256:477216b573b8177bbd849f4c754782a81279fbbd88bfadfeda44422d14b18b08", size = 54565, upload-time = "2025-09-04T01:51:10.044Z" },
-]
-
-[[package]]
-name = "langmem"
-version = "0.0.29"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "langchain" },
- { name = "langchain-anthropic" },
- { name = "langchain-core" },
- { name = "langchain-openai" },
- { name = "langgraph" },
- { name = "langgraph-checkpoint" },
- { name = "langsmith" },
- { name = "trustcall" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ec/75/a58f56a1f635003919f1c5c356a4247d8136d9183b63b9f52599aa7a8710/langmem-0.0.29.tar.gz", hash = "sha256:9a4a7bfcbde87f02494caf6add55c0cdd49c5a1a6396e19fe12a56ba6fb96267", size = 206315, upload-time = "2025-07-28T19:55:33.437Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f5/6a/ea17974afc18dbf278bbfaaa1331e3dfef979cf42bfae1dc695b5e4ea750/langmem-0.0.29-py3-none-any.whl", hash = "sha256:3e0b56d3e4077e96dab45616e2800c9550bf61c1e1eee4c119ec704518037d8c", size = 67127, upload-time = "2025-07-28T19:55:32.279Z" },
-]
-
-[[package]]
-name = "langsmith"
-version = "0.4.27"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "httpx" },
- { name = "orjson", marker = "platform_python_implementation != 'PyPy'" },
- { name = "packaging" },
- { name = "pydantic" },
- { name = "requests" },
- { name = "requests-toolbelt" },
- { name = "zstandard" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/62/6f/7d88228b7614fa0204e58b8b8c46e6f564659ee07a525c8aeae77a05598a/langsmith-0.4.27.tar.gz", hash = "sha256:6e8bbc425797202952d4e849431e6276e7985b44536ec0582eb96eaf9129c393", size = 956062, upload-time = "2025-09-08T19:01:49.677Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2d/26/99bc52e1c47fb4b995aece85a5313349a5e2559e4143ee2345d8bd1446ff/langsmith-0.4.27-py3-none-any.whl", hash = "sha256:23708e6478d1c74ac0e428bbc92df6704993e34305fb62a0c64d2fefc35bd67f", size = 384752, upload-time = "2025-09-08T19:01:47.362Z" },
-]
-
-[[package]]
-name = "lxml"
-version = "6.0.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/8f/bd/f9d01fd4132d81c6f43ab01983caea69ec9614b913c290a26738431a015d/lxml-6.0.1.tar.gz", hash = "sha256:2b3a882ebf27dd026df3801a87cf49ff791336e0f94b0fad195db77e01240690", size = 4070214, upload-time = "2025-08-22T10:37:53.525Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b0/a9/82b244c8198fcdf709532e39a1751943a36b3e800b420adc739d751e0299/lxml-6.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c03ac546adaabbe0b8e4a15d9ad815a281afc8d36249c246aecf1aaad7d6f200", size = 8422788, upload-time = "2025-08-22T10:32:56.612Z" },
- { url = "https://files.pythonhosted.org/packages/c9/8d/1ed2bc20281b0e7ed3e6c12b0a16e64ae2065d99be075be119ba88486e6d/lxml-6.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33b862c7e3bbeb4ba2c96f3a039f925c640eeba9087a4dc7a572ec0f19d89392", size = 4593547, upload-time = "2025-08-22T10:32:59.016Z" },
- { url = "https://files.pythonhosted.org/packages/76/53/d7fd3af95b72a3493bf7fbe842a01e339d8f41567805cecfecd5c71aa5ee/lxml-6.0.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a3ec1373f7d3f519de595032d4dcafae396c29407cfd5073f42d267ba32440d", size = 4948101, upload-time = "2025-08-22T10:33:00.765Z" },
- { url = "https://files.pythonhosted.org/packages/9d/51/4e57cba4d55273c400fb63aefa2f0d08d15eac021432571a7eeefee67bed/lxml-6.0.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:03b12214fb1608f4cffa181ec3d046c72f7e77c345d06222144744c122ded870", size = 5108090, upload-time = "2025-08-22T10:33:03.108Z" },
- { url = "https://files.pythonhosted.org/packages/f6/6e/5f290bc26fcc642bc32942e903e833472271614e24d64ad28aaec09d5dae/lxml-6.0.1-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:207ae0d5f0f03b30f95e649a6fa22aa73f5825667fee9c7ec6854d30e19f2ed8", size = 5021791, upload-time = "2025-08-22T10:33:06.972Z" },
- { url = "https://files.pythonhosted.org/packages/13/d4/2e7551a86992ece4f9a0f6eebd4fb7e312d30f1e372760e2109e721d4ce6/lxml-6.0.1-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:32297b09ed4b17f7b3f448de87a92fb31bb8747496623483788e9f27c98c0f00", size = 5358861, upload-time = "2025-08-22T10:33:08.967Z" },
- { url = "https://files.pythonhosted.org/packages/8a/5f/cb49d727fc388bf5fd37247209bab0da11697ddc5e976ccac4826599939e/lxml-6.0.1-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7e18224ea241b657a157c85e9cac82c2b113ec90876e01e1f127312006233756", size = 5652569, upload-time = "2025-08-22T10:33:10.815Z" },
- { url = "https://files.pythonhosted.org/packages/ca/b8/66c1ef8c87ad0f958b0a23998851e610607c74849e75e83955d5641272e6/lxml-6.0.1-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a07a994d3c46cd4020c1ea566345cf6815af205b1e948213a4f0f1d392182072", size = 5252262, upload-time = "2025-08-22T10:33:12.673Z" },
- { url = "https://files.pythonhosted.org/packages/1a/ef/131d3d6b9590e64fdbb932fbc576b81fcc686289da19c7cb796257310e82/lxml-6.0.1-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:2287fadaa12418a813b05095485c286c47ea58155930cfbd98c590d25770e225", size = 4710309, upload-time = "2025-08-22T10:33:14.952Z" },
- { url = "https://files.pythonhosted.org/packages/bc/3f/07f48ae422dce44902309aa7ed386c35310929dc592439c403ec16ef9137/lxml-6.0.1-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b4e597efca032ed99f418bd21314745522ab9fa95af33370dcee5533f7f70136", size = 5265786, upload-time = "2025-08-22T10:33:16.721Z" },
- { url = "https://files.pythonhosted.org/packages/11/c7/125315d7b14ab20d9155e8316f7d287a4956098f787c22d47560b74886c4/lxml-6.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9696d491f156226decdd95d9651c6786d43701e49f32bf23715c975539aa2b3b", size = 5062272, upload-time = "2025-08-22T10:33:18.478Z" },
- { url = "https://files.pythonhosted.org/packages/8b/c3/51143c3a5fc5168a7c3ee626418468ff20d30f5a59597e7b156c1e61fba8/lxml-6.0.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e4e3cd3585f3c6f87cdea44cda68e692cc42a012f0131d25957ba4ce755241a7", size = 4786955, upload-time = "2025-08-22T10:33:20.34Z" },
- { url = "https://files.pythonhosted.org/packages/11/86/73102370a420ec4529647b31c4a8ce8c740c77af3a5fae7a7643212d6f6e/lxml-6.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:45cbc92f9d22c28cd3b97f8d07fcefa42e569fbd587dfdac76852b16a4924277", size = 5673557, upload-time = "2025-08-22T10:33:22.282Z" },
- { url = "https://files.pythonhosted.org/packages/d7/2d/aad90afaec51029aef26ef773b8fd74a9e8706e5e2f46a57acd11a421c02/lxml-6.0.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:f8c9bcfd2e12299a442fba94459adf0b0d001dbc68f1594439bfa10ad1ecb74b", size = 5254211, upload-time = "2025-08-22T10:33:24.15Z" },
- { url = "https://files.pythonhosted.org/packages/63/01/c9e42c8c2d8b41f4bdefa42ab05448852e439045f112903dd901b8fbea4d/lxml-6.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1e9dc2b9f1586e7cd77753eae81f8d76220eed9b768f337dc83a3f675f2f0cf9", size = 5275817, upload-time = "2025-08-22T10:33:26.007Z" },
- { url = "https://files.pythonhosted.org/packages/bc/1f/962ea2696759abe331c3b0e838bb17e92224f39c638c2068bf0d8345e913/lxml-6.0.1-cp312-cp312-win32.whl", hash = "sha256:987ad5c3941c64031f59c226167f55a04d1272e76b241bfafc968bdb778e07fb", size = 3610889, upload-time = "2025-08-22T10:33:28.169Z" },
- { url = "https://files.pythonhosted.org/packages/41/e2/22c86a990b51b44442b75c43ecb2f77b8daba8c4ba63696921966eac7022/lxml-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:abb05a45394fd76bf4a60c1b7bec0e6d4e8dfc569fc0e0b1f634cd983a006ddc", size = 4010925, upload-time = "2025-08-22T10:33:29.874Z" },
- { url = "https://files.pythonhosted.org/packages/b2/21/dc0c73325e5eb94ef9c9d60dbb5dcdcb2e7114901ea9509735614a74e75a/lxml-6.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:c4be29bce35020d8579d60aa0a4e95effd66fcfce31c46ffddf7e5422f73a299", size = 3671922, upload-time = "2025-08-22T10:33:31.535Z" },
- { url = "https://files.pythonhosted.org/packages/43/c4/cd757eeec4548e6652eff50b944079d18ce5f8182d2b2cf514e125e8fbcb/lxml-6.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:485eda5d81bb7358db96a83546949c5fe7474bec6c68ef3fa1fb61a584b00eea", size = 8405139, upload-time = "2025-08-22T10:33:34.09Z" },
- { url = "https://files.pythonhosted.org/packages/ff/99/0290bb86a7403893f5e9658490c705fcea103b9191f2039752b071b4ef07/lxml-6.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d12160adea318ce3d118f0b4fbdff7d1225c75fb7749429541b4d217b85c3f76", size = 4585954, upload-time = "2025-08-22T10:33:36.294Z" },
- { url = "https://files.pythonhosted.org/packages/88/a7/4bb54dd1e626342a0f7df6ec6ca44fdd5d0e100ace53acc00e9a689ead04/lxml-6.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48c8d335d8ab72f9265e7ba598ae5105a8272437403f4032107dbcb96d3f0b29", size = 4944052, upload-time = "2025-08-22T10:33:38.19Z" },
- { url = "https://files.pythonhosted.org/packages/71/8d/20f51cd07a7cbef6214675a8a5c62b2559a36d9303fe511645108887c458/lxml-6.0.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:405e7cf9dbdbb52722c231e0f1257214202dfa192327fab3de45fd62e0554082", size = 5098885, upload-time = "2025-08-22T10:33:40.035Z" },
- { url = "https://files.pythonhosted.org/packages/5a/63/efceeee7245d45f97d548e48132258a36244d3c13c6e3ddbd04db95ff496/lxml-6.0.1-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:299a790d403335a6a057ade46f92612ebab87b223e4e8c5308059f2dc36f45ed", size = 5017542, upload-time = "2025-08-22T10:33:41.896Z" },
- { url = "https://files.pythonhosted.org/packages/57/5d/92cb3d3499f5caba17f7933e6be3b6c7de767b715081863337ced42eb5f2/lxml-6.0.1-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:48da704672f6f9c461e9a73250440c647638cc6ff9567ead4c3b1f189a604ee8", size = 5347303, upload-time = "2025-08-22T10:33:43.868Z" },
- { url = "https://files.pythonhosted.org/packages/69/f8/606fa16a05d7ef5e916c6481c634f40870db605caffed9d08b1a4fb6b989/lxml-6.0.1-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:21e364e1bb731489e3f4d51db416f991a5d5da5d88184728d80ecfb0904b1d68", size = 5641055, upload-time = "2025-08-22T10:33:45.784Z" },
- { url = "https://files.pythonhosted.org/packages/b3/01/15d5fc74ebb49eac4e5df031fbc50713dcc081f4e0068ed963a510b7d457/lxml-6.0.1-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1bce45a2c32032afddbd84ed8ab092130649acb935536ef7a9559636ce7ffd4a", size = 5242719, upload-time = "2025-08-22T10:33:48.089Z" },
- { url = "https://files.pythonhosted.org/packages/42/a5/1b85e2aaaf8deaa67e04c33bddb41f8e73d07a077bf9db677cec7128bfb4/lxml-6.0.1-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:fa164387ff20ab0e575fa909b11b92ff1481e6876835014e70280769920c4433", size = 4717310, upload-time = "2025-08-22T10:33:49.852Z" },
- { url = "https://files.pythonhosted.org/packages/42/23/f3bb1292f55a725814317172eeb296615db3becac8f1a059b53c51fc1da8/lxml-6.0.1-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7587ac5e000e1594e62278422c5783b34a82b22f27688b1074d71376424b73e8", size = 5254024, upload-time = "2025-08-22T10:33:52.22Z" },
- { url = "https://files.pythonhosted.org/packages/b4/be/4d768f581ccd0386d424bac615d9002d805df7cc8482ae07d529f60a3c1e/lxml-6.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:57478424ac4c9170eabf540237125e8d30fad1940648924c058e7bc9fb9cf6dd", size = 5055335, upload-time = "2025-08-22T10:33:54.041Z" },
- { url = "https://files.pythonhosted.org/packages/40/07/ed61d1a3e77d1a9f856c4fab15ee5c09a2853fb7af13b866bb469a3a6d42/lxml-6.0.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:09c74afc7786c10dd6afaa0be2e4805866beadc18f1d843cf517a7851151b499", size = 4784864, upload-time = "2025-08-22T10:33:56.382Z" },
- { url = "https://files.pythonhosted.org/packages/01/37/77e7971212e5c38a55431744f79dff27fd751771775165caea096d055ca4/lxml-6.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7fd70681aeed83b196482d42a9b0dc5b13bab55668d09ad75ed26dff3be5a2f5", size = 5657173, upload-time = "2025-08-22T10:33:58.698Z" },
- { url = "https://files.pythonhosted.org/packages/32/a3/e98806d483941cd9061cc838b1169626acef7b2807261fbe5e382fcef881/lxml-6.0.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:10a72e456319b030b3dd900df6b1f19d89adf06ebb688821636dc406788cf6ac", size = 5245896, upload-time = "2025-08-22T10:34:00.586Z" },
- { url = "https://files.pythonhosted.org/packages/07/de/9bb5a05e42e8623bf06b4638931ea8c8f5eb5a020fe31703abdbd2e83547/lxml-6.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b0fa45fb5f55111ce75b56c703843b36baaf65908f8b8d2fbbc0e249dbc127ed", size = 5267417, upload-time = "2025-08-22T10:34:02.719Z" },
- { url = "https://files.pythonhosted.org/packages/f2/43/c1cb2a7c67226266c463ef8a53b82d42607228beb763b5fbf4867e88a21f/lxml-6.0.1-cp313-cp313-win32.whl", hash = "sha256:01dab65641201e00c69338c9c2b8a0f2f484b6b3a22d10779bb417599fae32b5", size = 3610051, upload-time = "2025-08-22T10:34:04.553Z" },
- { url = "https://files.pythonhosted.org/packages/34/96/6a6c3b8aa480639c1a0b9b6faf2a63fb73ab79ffcd2a91cf28745faa22de/lxml-6.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:bdf8f7c8502552d7bff9e4c98971910a0a59f60f88b5048f608d0a1a75e94d1c", size = 4009325, upload-time = "2025-08-22T10:34:06.24Z" },
- { url = "https://files.pythonhosted.org/packages/8c/66/622e8515121e1fd773e3738dae71b8df14b12006d9fb554ce90886689fd0/lxml-6.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a6aeca75959426b9fd8d4782c28723ba224fe07cfa9f26a141004210528dcbe2", size = 3670443, upload-time = "2025-08-22T10:34:07.974Z" },
- { url = "https://files.pythonhosted.org/packages/38/e3/b7eb612ce07abe766918a7e581ec6a0e5212352194001fd287c3ace945f0/lxml-6.0.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:29b0e849ec7030e3ecb6112564c9f7ad6881e3b2375dd4a0c486c5c1f3a33859", size = 8426160, upload-time = "2025-08-22T10:34:10.154Z" },
- { url = "https://files.pythonhosted.org/packages/35/8f/ab3639a33595cf284fe733c6526da2ca3afbc5fd7f244ae67f3303cec654/lxml-6.0.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:02a0f7e629f73cc0be598c8b0611bf28ec3b948c549578a26111b01307fd4051", size = 4589288, upload-time = "2025-08-22T10:34:12.972Z" },
- { url = "https://files.pythonhosted.org/packages/2c/65/819d54f2e94d5c4458c1db8c1ccac9d05230b27c1038937d3d788eb406f9/lxml-6.0.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:beab5e54de016e730875f612ba51e54c331e2fa6dc78ecf9a5415fc90d619348", size = 4964523, upload-time = "2025-08-22T10:34:15.474Z" },
- { url = "https://files.pythonhosted.org/packages/5b/4a/d4a74ce942e60025cdaa883c5a4478921a99ce8607fc3130f1e349a83b28/lxml-6.0.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92a08aefecd19ecc4ebf053c27789dd92c87821df2583a4337131cf181a1dffa", size = 5101108, upload-time = "2025-08-22T10:34:17.348Z" },
- { url = "https://files.pythonhosted.org/packages/cb/48/67f15461884074edd58af17b1827b983644d1fae83b3d909e9045a08b61e/lxml-6.0.1-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36c8fa7e177649470bc3dcf7eae6bee1e4984aaee496b9ccbf30e97ac4127fa2", size = 5053498, upload-time = "2025-08-22T10:34:19.232Z" },
- { url = "https://files.pythonhosted.org/packages/b6/d4/ec1bf1614828a5492f4af0b6a9ee2eb3e92440aea3ac4fa158e5228b772b/lxml-6.0.1-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:5d08e0f1af6916267bb7eff21c09fa105620f07712424aaae09e8cb5dd4164d1", size = 5351057, upload-time = "2025-08-22T10:34:21.143Z" },
- { url = "https://files.pythonhosted.org/packages/65/2b/c85929dacac08821f2100cea3eb258ce5c8804a4e32b774f50ebd7592850/lxml-6.0.1-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9705cdfc05142f8c38c97a61bd3a29581ceceb973a014e302ee4a73cc6632476", size = 5671579, upload-time = "2025-08-22T10:34:23.528Z" },
- { url = "https://files.pythonhosted.org/packages/d0/36/cf544d75c269b9aad16752fd9f02d8e171c5a493ca225cb46bb7ba72868c/lxml-6.0.1-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74555e2da7c1636e30bff4e6e38d862a634cf020ffa591f1f63da96bf8b34772", size = 5250403, upload-time = "2025-08-22T10:34:25.642Z" },
- { url = "https://files.pythonhosted.org/packages/c2/e8/83dbc946ee598fd75fdeae6151a725ddeaab39bb321354a9468d4c9f44f3/lxml-6.0.1-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:e38b5f94c5a2a5dadaddd50084098dfd005e5a2a56cd200aaf5e0a20e8941782", size = 4696712, upload-time = "2025-08-22T10:34:27.753Z" },
- { url = "https://files.pythonhosted.org/packages/f4/72/889c633b47c06205743ba935f4d1f5aa4eb7f0325d701ed2b0540df1b004/lxml-6.0.1-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a5ec101a92ddacb4791977acfc86c1afd624c032974bfb6a21269d1083c9bc49", size = 5268177, upload-time = "2025-08-22T10:34:29.804Z" },
- { url = "https://files.pythonhosted.org/packages/b0/b6/f42a21a1428479b66ea0da7bd13e370436aecaff0cfe93270c7e165bd2a4/lxml-6.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:5c17e70c82fd777df586c12114bbe56e4e6f823a971814fd40dec9c0de518772", size = 5094648, upload-time = "2025-08-22T10:34:31.703Z" },
- { url = "https://files.pythonhosted.org/packages/51/b0/5f8c1e8890e2ee1c2053c2eadd1cb0e4b79e2304e2912385f6ca666f48b1/lxml-6.0.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:45fdd0415a0c3d91640b5d7a650a8f37410966a2e9afebb35979d06166fd010e", size = 4745220, upload-time = "2025-08-22T10:34:33.595Z" },
- { url = "https://files.pythonhosted.org/packages/eb/f9/820b5125660dae489ca3a21a36d9da2e75dd6b5ffe922088f94bbff3b8a0/lxml-6.0.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:d417eba28981e720a14fcb98f95e44e7a772fe25982e584db38e5d3b6ee02e79", size = 5692913, upload-time = "2025-08-22T10:34:35.482Z" },
- { url = "https://files.pythonhosted.org/packages/23/8e/a557fae9eec236618aecf9ff35fec18df41b6556d825f3ad6017d9f6e878/lxml-6.0.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:8e5d116b9e59be7934febb12c41cce2038491ec8fdb743aeacaaf36d6e7597e4", size = 5259816, upload-time = "2025-08-22T10:34:37.482Z" },
- { url = "https://files.pythonhosted.org/packages/fa/fd/b266cfaab81d93a539040be699b5854dd24c84e523a1711ee5f615aa7000/lxml-6.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c238f0d0d40fdcb695c439fe5787fa69d40f45789326b3bb6ef0d61c4b588d6e", size = 5276162, upload-time = "2025-08-22T10:34:39.507Z" },
- { url = "https://files.pythonhosted.org/packages/25/6c/6f9610fbf1de002048e80585ea4719591921a0316a8565968737d9f125ca/lxml-6.0.1-cp314-cp314-win32.whl", hash = "sha256:537b6cf1c5ab88cfd159195d412edb3e434fee880f206cbe68dff9c40e17a68a", size = 3669595, upload-time = "2025-08-22T10:34:41.783Z" },
- { url = "https://files.pythonhosted.org/packages/72/a5/506775e3988677db24dc75a7b03e04038e0b3d114ccd4bccea4ce0116c15/lxml-6.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:911d0a2bb3ef3df55b3d97ab325a9ca7e438d5112c102b8495321105d25a441b", size = 4079818, upload-time = "2025-08-22T10:34:44.04Z" },
- { url = "https://files.pythonhosted.org/packages/0a/44/9613f300201b8700215856e5edd056d4e58dd23368699196b58877d4408b/lxml-6.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:2834377b0145a471a654d699bdb3a2155312de492142ef5a1d426af2c60a0a31", size = 3753901, upload-time = "2025-08-22T10:34:45.799Z" },
-]
-
-[[package]]
-name = "mako"
-version = "1.3.10"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markupsafe" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9e/38/bd5b78a920a64d708fe6bc8e0a2c075e1389d53bef8413725c63ba041535/mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28", size = 392474, upload-time = "2025-04-10T12:44:31.16Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" },
-]
-
-[[package]]
-name = "markdown-it-py"
-version = "4.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mdurl" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
-]
-
-[[package]]
-name = "markupsafe"
-version = "3.0.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" },
- { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" },
- { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" },
- { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" },
- { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" },
- { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" },
- { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" },
- { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" },
- { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" },
- { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" },
- { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" },
- { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" },
- { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" },
- { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" },
- { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" },
- { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" },
- { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" },
- { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" },
- { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" },
- { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" },
- { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" },
- { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" },
- { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" },
- { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" },
- { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" },
- { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" },
- { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" },
- { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" },
- { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" },
- { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
-]
-
-[[package]]
-name = "marshmallow"
-version = "3.26.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "packaging" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" },
-]
-
-[[package]]
-name = "mdurl"
-version = "0.1.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
-]
-
-[[package]]
-name = "multiaddr"
-version = "0.0.9"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "base58" },
- { name = "netaddr" },
- { name = "six" },
- { name = "varint" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/12/f4/fa5353022ad8e0fd364bfa8b474f9562c36ce1305fad31fe52b849e30795/multiaddr-0.0.9.tar.gz", hash = "sha256:30b2695189edc3d5b90f1c303abb8f02d963a3a4edf2e7178b975eb417ab0ecf", size = 24726, upload-time = "2019-12-23T07:06:21.146Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/51/59/df732566d951c33f00a4022fc5bf9c5d1661b1c2cdaf56e75a1a5fa8f829/multiaddr-0.0.9-py2.py3-none-any.whl", hash = "sha256:5c0f862cbcf19aada2a899f80ef896ddb2e85614e0c8f04dd287c06c69dac95b", size = 16281, upload-time = "2019-12-23T07:06:18.915Z" },
-]
-
-[[package]]
-name = "multidict"
-version = "6.6.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" },
- { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" },
- { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" },
- { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" },
- { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" },
- { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" },
- { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" },
- { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" },
- { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" },
- { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" },
- { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" },
- { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" },
- { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" },
- { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" },
- { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" },
- { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" },
- { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" },
- { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" },
- { url = "https://files.pythonhosted.org/packages/3a/5d/e1db626f64f60008320aab00fbe4f23fc3300d75892a3381275b3d284580/multidict-6.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f46a6e8597f9bd71b31cc708195d42b634c8527fecbcf93febf1052cacc1f16e", size = 75848, upload-time = "2025-08-11T12:07:19.912Z" },
- { url = "https://files.pythonhosted.org/packages/4c/aa/8b6f548d839b6c13887253af4e29c939af22a18591bfb5d0ee6f1931dae8/multidict-6.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:22e38b2bc176c5eb9c0a0e379f9d188ae4cd8b28c0f53b52bce7ab0a9e534657", size = 45060, upload-time = "2025-08-11T12:07:21.163Z" },
- { url = "https://files.pythonhosted.org/packages/eb/c6/f5e97e5d99a729bc2aa58eb3ebfa9f1e56a9b517cc38c60537c81834a73f/multidict-6.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5df8afd26f162da59e218ac0eefaa01b01b2e6cd606cffa46608f699539246da", size = 43269, upload-time = "2025-08-11T12:07:22.392Z" },
- { url = "https://files.pythonhosted.org/packages/dc/31/d54eb0c62516776f36fe67f84a732f97e0b0e12f98d5685bebcc6d396910/multidict-6.6.4-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:49517449b58d043023720aa58e62b2f74ce9b28f740a0b5d33971149553d72aa", size = 237158, upload-time = "2025-08-11T12:07:23.636Z" },
- { url = "https://files.pythonhosted.org/packages/c4/1c/8a10c1c25b23156e63b12165a929d8eb49a6ed769fdbefb06e6f07c1e50d/multidict-6.6.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9408439537c5afdca05edd128a63f56a62680f4b3c234301055d7a2000220f", size = 257076, upload-time = "2025-08-11T12:07:25.049Z" },
- { url = "https://files.pythonhosted.org/packages/ad/86/90e20b5771d6805a119e483fd3d1e8393e745a11511aebca41f0da38c3e2/multidict-6.6.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87a32d20759dc52a9e850fe1061b6e41ab28e2998d44168a8a341b99ded1dba0", size = 240694, upload-time = "2025-08-11T12:07:26.458Z" },
- { url = "https://files.pythonhosted.org/packages/e7/49/484d3e6b535bc0555b52a0a26ba86e4d8d03fd5587d4936dc59ba7583221/multidict-6.6.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52e3c8d43cdfff587ceedce9deb25e6ae77daba560b626e97a56ddcad3756879", size = 266350, upload-time = "2025-08-11T12:07:27.94Z" },
- { url = "https://files.pythonhosted.org/packages/bf/b4/aa4c5c379b11895083d50021e229e90c408d7d875471cb3abf721e4670d6/multidict-6.6.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ad8850921d3a8d8ff6fbef790e773cecfc260bbfa0566998980d3fa8f520bc4a", size = 267250, upload-time = "2025-08-11T12:07:29.303Z" },
- { url = "https://files.pythonhosted.org/packages/80/e5/5e22c5bf96a64bdd43518b1834c6d95a4922cc2066b7d8e467dae9b6cee6/multidict-6.6.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:497a2954adc25c08daff36f795077f63ad33e13f19bfff7736e72c785391534f", size = 254900, upload-time = "2025-08-11T12:07:30.764Z" },
- { url = "https://files.pythonhosted.org/packages/17/38/58b27fed927c07035abc02befacab42491e7388ca105e087e6e0215ead64/multidict-6.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:024ce601f92d780ca1617ad4be5ac15b501cc2414970ffa2bb2bbc2bd5a68fa5", size = 252355, upload-time = "2025-08-11T12:07:32.205Z" },
- { url = "https://files.pythonhosted.org/packages/d0/a1/dad75d23a90c29c02b5d6f3d7c10ab36c3197613be5d07ec49c7791e186c/multidict-6.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a693fc5ed9bdd1c9e898013e0da4dcc640de7963a371c0bd458e50e046bf6438", size = 250061, upload-time = "2025-08-11T12:07:33.623Z" },
- { url = "https://files.pythonhosted.org/packages/b8/1a/ac2216b61c7f116edab6dc3378cca6c70dc019c9a457ff0d754067c58b20/multidict-6.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:190766dac95aab54cae5b152a56520fd99298f32a1266d66d27fdd1b5ac00f4e", size = 249675, upload-time = "2025-08-11T12:07:34.958Z" },
- { url = "https://files.pythonhosted.org/packages/d4/79/1916af833b800d13883e452e8e0977c065c4ee3ab7a26941fbfdebc11895/multidict-6.6.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8f2a5ffdceab9dcd97c7a016deb2308531d5f0fced2bb0c9e1df45b3363d7", size = 261247, upload-time = "2025-08-11T12:07:36.588Z" },
- { url = "https://files.pythonhosted.org/packages/c5/65/d1f84fe08ac44a5fc7391cbc20a7cedc433ea616b266284413fd86062f8c/multidict-6.6.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:59e8d40ab1f5a8597abcef00d04845155a5693b5da00d2c93dbe88f2050f2812", size = 257960, upload-time = "2025-08-11T12:07:39.735Z" },
- { url = "https://files.pythonhosted.org/packages/13/b5/29ec78057d377b195ac2c5248c773703a6b602e132a763e20ec0457e7440/multidict-6.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:467fe64138cfac771f0e949b938c2e1ada2b5af22f39692aa9258715e9ea613a", size = 250078, upload-time = "2025-08-11T12:07:41.525Z" },
- { url = "https://files.pythonhosted.org/packages/c4/0e/7e79d38f70a872cae32e29b0d77024bef7834b0afb406ddae6558d9e2414/multidict-6.6.4-cp313-cp313-win32.whl", hash = "sha256:14616a30fe6d0a48d0a48d1a633ab3b8bec4cf293aac65f32ed116f620adfd69", size = 41708, upload-time = "2025-08-11T12:07:43.405Z" },
- { url = "https://files.pythonhosted.org/packages/9d/34/746696dffff742e97cd6a23da953e55d0ea51fa601fa2ff387b3edcfaa2c/multidict-6.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:40cd05eaeb39e2bc8939451f033e57feaa2ac99e07dbca8afe2be450a4a3b6cf", size = 45912, upload-time = "2025-08-11T12:07:45.082Z" },
- { url = "https://files.pythonhosted.org/packages/c7/87/3bac136181e271e29170d8d71929cdeddeb77f3e8b6a0c08da3a8e9da114/multidict-6.6.4-cp313-cp313-win_arm64.whl", hash = "sha256:f6eb37d511bfae9e13e82cb4d1af36b91150466f24d9b2b8a9785816deb16605", size = 43076, upload-time = "2025-08-11T12:07:46.746Z" },
- { url = "https://files.pythonhosted.org/packages/64/94/0a8e63e36c049b571c9ae41ee301ada29c3fee9643d9c2548d7d558a1d99/multidict-6.6.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6c84378acd4f37d1b507dfa0d459b449e2321b3ba5f2338f9b085cf7a7ba95eb", size = 82812, upload-time = "2025-08-11T12:07:48.402Z" },
- { url = "https://files.pythonhosted.org/packages/25/1a/be8e369dfcd260d2070a67e65dd3990dd635cbd735b98da31e00ea84cd4e/multidict-6.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0e0558693063c75f3d952abf645c78f3c5dfdd825a41d8c4d8156fc0b0da6e7e", size = 48313, upload-time = "2025-08-11T12:07:49.679Z" },
- { url = "https://files.pythonhosted.org/packages/26/5a/dd4ade298674b2f9a7b06a32c94ffbc0497354df8285f27317c66433ce3b/multidict-6.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3f8e2384cb83ebd23fd07e9eada8ba64afc4c759cd94817433ab8c81ee4b403f", size = 46777, upload-time = "2025-08-11T12:07:51.318Z" },
- { url = "https://files.pythonhosted.org/packages/89/db/98aa28bc7e071bfba611ac2ae803c24e96dd3a452b4118c587d3d872c64c/multidict-6.6.4-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f996b87b420995a9174b2a7c1a8daf7db4750be6848b03eb5e639674f7963773", size = 229321, upload-time = "2025-08-11T12:07:52.965Z" },
- { url = "https://files.pythonhosted.org/packages/c7/bc/01ddda2a73dd9d167bd85d0e8ef4293836a8f82b786c63fb1a429bc3e678/multidict-6.6.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc356250cffd6e78416cf5b40dc6a74f1edf3be8e834cf8862d9ed5265cf9b0e", size = 249954, upload-time = "2025-08-11T12:07:54.423Z" },
- { url = "https://files.pythonhosted.org/packages/06/78/6b7c0f020f9aa0acf66d0ab4eb9f08375bac9a50ff5e3edb1c4ccd59eafc/multidict-6.6.4-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dadf95aa862714ea468a49ad1e09fe00fcc9ec67d122f6596a8d40caf6cec7d0", size = 228612, upload-time = "2025-08-11T12:07:55.914Z" },
- { url = "https://files.pythonhosted.org/packages/00/44/3faa416f89b2d5d76e9d447296a81521e1c832ad6e40b92f990697b43192/multidict-6.6.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7dd57515bebffd8ebd714d101d4c434063322e4fe24042e90ced41f18b6d3395", size = 257528, upload-time = "2025-08-11T12:07:57.371Z" },
- { url = "https://files.pythonhosted.org/packages/05/5f/77c03b89af0fcb16f018f668207768191fb9dcfb5e3361a5e706a11db2c9/multidict-6.6.4-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:967af5f238ebc2eb1da4e77af5492219fbd9b4b812347da39a7b5f5c72c0fa45", size = 256329, upload-time = "2025-08-11T12:07:58.844Z" },
- { url = "https://files.pythonhosted.org/packages/cf/e9/ed750a2a9afb4f8dc6f13dc5b67b514832101b95714f1211cd42e0aafc26/multidict-6.6.4-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a4c6875c37aae9794308ec43e3530e4aa0d36579ce38d89979bbf89582002bb", size = 247928, upload-time = "2025-08-11T12:08:01.037Z" },
- { url = "https://files.pythonhosted.org/packages/1f/b5/e0571bc13cda277db7e6e8a532791d4403dacc9850006cb66d2556e649c0/multidict-6.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f683a551e92bdb7fac545b9c6f9fa2aebdeefa61d607510b3533286fcab67f5", size = 245228, upload-time = "2025-08-11T12:08:02.96Z" },
- { url = "https://files.pythonhosted.org/packages/f3/a3/69a84b0eccb9824491f06368f5b86e72e4af54c3067c37c39099b6687109/multidict-6.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:3ba5aaf600edaf2a868a391779f7a85d93bed147854925f34edd24cc70a3e141", size = 235869, upload-time = "2025-08-11T12:08:04.746Z" },
- { url = "https://files.pythonhosted.org/packages/a9/9d/28802e8f9121a6a0804fa009debf4e753d0a59969ea9f70be5f5fdfcb18f/multidict-6.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:580b643b7fd2c295d83cad90d78419081f53fd532d1f1eb67ceb7060f61cff0d", size = 243446, upload-time = "2025-08-11T12:08:06.332Z" },
- { url = "https://files.pythonhosted.org/packages/38/ea/6c98add069b4878c1d66428a5f5149ddb6d32b1f9836a826ac764b9940be/multidict-6.6.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:37b7187197da6af3ee0b044dbc9625afd0c885f2800815b228a0e70f9a7f473d", size = 252299, upload-time = "2025-08-11T12:08:07.931Z" },
- { url = "https://files.pythonhosted.org/packages/3a/09/8fe02d204473e14c0af3affd50af9078839dfca1742f025cca765435d6b4/multidict-6.6.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e1b93790ed0bc26feb72e2f08299691ceb6da5e9e14a0d13cc74f1869af327a0", size = 246926, upload-time = "2025-08-11T12:08:09.467Z" },
- { url = "https://files.pythonhosted.org/packages/37/3d/7b1e10d774a6df5175ecd3c92bff069e77bed9ec2a927fdd4ff5fe182f67/multidict-6.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a506a77ddee1efcca81ecbeae27ade3e09cdf21a8ae854d766c2bb4f14053f92", size = 243383, upload-time = "2025-08-11T12:08:10.981Z" },
- { url = "https://files.pythonhosted.org/packages/50/b0/a6fae46071b645ae98786ab738447de1ef53742eaad949f27e960864bb49/multidict-6.6.4-cp313-cp313t-win32.whl", hash = "sha256:f93b2b2279883d1d0a9e1bd01f312d6fc315c5e4c1f09e112e4736e2f650bc4e", size = 47775, upload-time = "2025-08-11T12:08:12.439Z" },
- { url = "https://files.pythonhosted.org/packages/b2/0a/2436550b1520091af0600dff547913cb2d66fbac27a8c33bc1b1bccd8d98/multidict-6.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:6d46a180acdf6e87cc41dc15d8f5c2986e1e8739dc25dbb7dac826731ef381a4", size = 53100, upload-time = "2025-08-11T12:08:13.823Z" },
- { url = "https://files.pythonhosted.org/packages/97/ea/43ac51faff934086db9c072a94d327d71b7d8b40cd5dcb47311330929ef0/multidict-6.6.4-cp313-cp313t-win_arm64.whl", hash = "sha256:756989334015e3335d087a27331659820d53ba432befdef6a718398b0a8493ad", size = 45501, upload-time = "2025-08-11T12:08:15.173Z" },
- { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" },
-]
-
-[[package]]
-name = "mypy"
-version = "1.17.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mypy-extensions" },
- { name = "pathspec" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1e/e3/034322d5a779685218ed69286c32faa505247f1f096251ef66c8fd203b08/mypy-1.17.0.tar.gz", hash = "sha256:e5d7ccc08ba089c06e2f5629c660388ef1fee708444f1dee0b9203fa031dee03", size = 3352114, upload-time = "2025-07-14T20:34:30.181Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/12/e9/e6824ed620bbf51d3bf4d6cbbe4953e83eaf31a448d1b3cfb3620ccb641c/mypy-1.17.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f986f1cab8dbec39ba6e0eaa42d4d3ac6686516a5d3dccd64be095db05ebc6bb", size = 11086395, upload-time = "2025-07-14T20:34:11.452Z" },
- { url = "https://files.pythonhosted.org/packages/ba/51/a4afd1ae279707953be175d303f04a5a7bd7e28dc62463ad29c1c857927e/mypy-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:51e455a54d199dd6e931cd7ea987d061c2afbaf0960f7f66deef47c90d1b304d", size = 10120052, upload-time = "2025-07-14T20:33:09.897Z" },
- { url = "https://files.pythonhosted.org/packages/8a/71/19adfeac926ba8205f1d1466d0d360d07b46486bf64360c54cb5a2bd86a8/mypy-1.17.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3204d773bab5ff4ebbd1f8efa11b498027cd57017c003ae970f310e5b96be8d8", size = 11861806, upload-time = "2025-07-14T20:32:16.028Z" },
- { url = "https://files.pythonhosted.org/packages/0b/64/d6120eca3835baf7179e6797a0b61d6c47e0bc2324b1f6819d8428d5b9ba/mypy-1.17.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1051df7ec0886fa246a530ae917c473491e9a0ba6938cfd0ec2abc1076495c3e", size = 12744371, upload-time = "2025-07-14T20:33:33.503Z" },
- { url = "https://files.pythonhosted.org/packages/1f/dc/56f53b5255a166f5bd0f137eed960e5065f2744509dfe69474ff0ba772a5/mypy-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f773c6d14dcc108a5b141b4456b0871df638eb411a89cd1c0c001fc4a9d08fc8", size = 12914558, upload-time = "2025-07-14T20:33:56.961Z" },
- { url = "https://files.pythonhosted.org/packages/69/ac/070bad311171badc9add2910e7f89271695a25c136de24bbafc7eded56d5/mypy-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:1619a485fd0e9c959b943c7b519ed26b712de3002d7de43154a489a2d0fd817d", size = 9585447, upload-time = "2025-07-14T20:32:20.594Z" },
- { url = "https://files.pythonhosted.org/packages/be/7b/5f8ab461369b9e62157072156935cec9d272196556bdc7c2ff5f4c7c0f9b/mypy-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c41aa59211e49d717d92b3bb1238c06d387c9325d3122085113c79118bebb06", size = 11070019, upload-time = "2025-07-14T20:32:07.99Z" },
- { url = "https://files.pythonhosted.org/packages/9c/f8/c49c9e5a2ac0badcc54beb24e774d2499748302c9568f7f09e8730e953fa/mypy-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e69db1fb65b3114f98c753e3930a00514f5b68794ba80590eb02090d54a5d4a", size = 10114457, upload-time = "2025-07-14T20:33:47.285Z" },
- { url = "https://files.pythonhosted.org/packages/89/0c/fb3f9c939ad9beed3e328008b3fb90b20fda2cddc0f7e4c20dbefefc3b33/mypy-1.17.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:03ba330b76710f83d6ac500053f7727270b6b8553b0423348ffb3af6f2f7b889", size = 11857838, upload-time = "2025-07-14T20:33:14.462Z" },
- { url = "https://files.pythonhosted.org/packages/4c/66/85607ab5137d65e4f54d9797b77d5a038ef34f714929cf8ad30b03f628df/mypy-1.17.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037bc0f0b124ce46bfde955c647f3e395c6174476a968c0f22c95a8d2f589bba", size = 12731358, upload-time = "2025-07-14T20:32:25.579Z" },
- { url = "https://files.pythonhosted.org/packages/73/d0/341dbbfb35ce53d01f8f2969facbb66486cee9804048bf6c01b048127501/mypy-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c38876106cb6132259683632b287238858bd58de267d80defb6f418e9ee50658", size = 12917480, upload-time = "2025-07-14T20:34:21.868Z" },
- { url = "https://files.pythonhosted.org/packages/64/63/70c8b7dbfc520089ac48d01367a97e8acd734f65bd07813081f508a8c94c/mypy-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:d30ba01c0f151998f367506fab31c2ac4527e6a7b2690107c7a7f9e3cb419a9c", size = 9589666, upload-time = "2025-07-14T20:34:16.841Z" },
- { url = "https://files.pythonhosted.org/packages/e3/fc/ee058cc4316f219078464555873e99d170bde1d9569abd833300dbeb484a/mypy-1.17.0-py3-none-any.whl", hash = "sha256:15d9d0018237ab058e5de3d8fce61b6fa72cc59cc78fd91f1b474bce12abf496", size = 2283195, upload-time = "2025-07-14T20:31:54.753Z" },
-]
-
-[[package]]
-name = "mypy-extensions"
-version = "1.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
-]
-
-[[package]]
-name = "netaddr"
-version = "1.3.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/54/90/188b2a69654f27b221fba92fda7217778208532c962509e959a9cee5229d/netaddr-1.3.0.tar.gz", hash = "sha256:5c3c3d9895b551b763779ba7db7a03487dc1f8e3b385af819af341ae9ef6e48a", size = 2260504, upload-time = "2024-05-28T21:30:37.743Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/12/cc/f4fe2c7ce68b92cbf5b2d379ca366e1edae38cccaad00f69f529b460c3ef/netaddr-1.3.0-py3-none-any.whl", hash = "sha256:c2c6a8ebe5554ce33b7d5b3a306b71bbb373e000bbbf2350dd5213cc56e3dbbe", size = 2262023, upload-time = "2024-05-28T21:30:34.191Z" },
-]
-
-[[package]]
-name = "nltk"
-version = "3.9.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "joblib" },
- { name = "regex" },
- { name = "tqdm" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3c/87/db8be88ad32c2d042420b6fd9ffd4a149f9a0d7f0e86b3f543be2eeeedd2/nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868", size = 2904691, upload-time = "2024-08-18T19:48:37.769Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1", size = 1505442, upload-time = "2024-08-18T19:48:21.909Z" },
-]
-
-[[package]]
-name = "numerize"
-version = "0.12"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6a/cf/c5dfa6ca5b6553f30860337020d76c582fd81b48da58982a6f2ff1f1fe40/numerize-0.12.tar.gz", hash = "sha256:5548fe72adceb2c7964998179697d80117bb117f57cd02f872cf5db40d615c04", size = 2721, upload-time = "2018-08-14T14:48:33.212Z" }
-
-[[package]]
-name = "numpy"
-version = "2.3.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d0/19/95b3d357407220ed24c139018d2518fab0a61a948e68286a25f1a4d049ff/numpy-2.3.3.tar.gz", hash = "sha256:ddc7c39727ba62b80dfdbedf400d1c10ddfa8eefbd7ec8dcb118be8b56d31029", size = 20576648, upload-time = "2025-09-09T16:54:12.543Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/51/5d/bb7fc075b762c96329147799e1bcc9176ab07ca6375ea976c475482ad5b3/numpy-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cfdd09f9c84a1a934cde1eec2267f0a43a7cd44b2cca4ff95b7c0d14d144b0bf", size = 20957014, upload-time = "2025-09-09T15:56:29.966Z" },
- { url = "https://files.pythonhosted.org/packages/6b/0e/c6211bb92af26517acd52125a237a92afe9c3124c6a68d3b9f81b62a0568/numpy-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb32e3cf0f762aee47ad1ddc6672988f7f27045b0783c887190545baba73aa25", size = 14185220, upload-time = "2025-09-09T15:56:32.175Z" },
- { url = "https://files.pythonhosted.org/packages/22/f2/07bb754eb2ede9073f4054f7c0286b0d9d2e23982e090a80d478b26d35ca/numpy-2.3.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:396b254daeb0a57b1fe0ecb5e3cff6fa79a380fa97c8f7781a6d08cd429418fe", size = 5113918, upload-time = "2025-09-09T15:56:34.175Z" },
- { url = "https://files.pythonhosted.org/packages/81/0a/afa51697e9fb74642f231ea36aca80fa17c8fb89f7a82abd5174023c3960/numpy-2.3.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:067e3d7159a5d8f8a0b46ee11148fc35ca9b21f61e3c49fbd0a027450e65a33b", size = 6647922, upload-time = "2025-09-09T15:56:36.149Z" },
- { url = "https://files.pythonhosted.org/packages/5d/f5/122d9cdb3f51c520d150fef6e87df9279e33d19a9611a87c0d2cf78a89f4/numpy-2.3.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c02d0629d25d426585fb2e45a66154081b9fa677bc92a881ff1d216bc9919a8", size = 14281991, upload-time = "2025-09-09T15:56:40.548Z" },
- { url = "https://files.pythonhosted.org/packages/51/64/7de3c91e821a2debf77c92962ea3fe6ac2bc45d0778c1cbe15d4fce2fd94/numpy-2.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9192da52b9745f7f0766531dcfa978b7763916f158bb63bdb8a1eca0068ab20", size = 16641643, upload-time = "2025-09-09T15:56:43.343Z" },
- { url = "https://files.pythonhosted.org/packages/30/e4/961a5fa681502cd0d68907818b69f67542695b74e3ceaa513918103b7e80/numpy-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cd7de500a5b66319db419dc3c345244404a164beae0d0937283b907d8152e6ea", size = 16056787, upload-time = "2025-09-09T15:56:46.141Z" },
- { url = "https://files.pythonhosted.org/packages/99/26/92c912b966e47fbbdf2ad556cb17e3a3088e2e1292b9833be1dfa5361a1a/numpy-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:93d4962d8f82af58f0b2eb85daaf1b3ca23fe0a85d0be8f1f2b7bb46034e56d7", size = 18579598, upload-time = "2025-09-09T15:56:49.844Z" },
- { url = "https://files.pythonhosted.org/packages/17/b6/fc8f82cb3520768718834f310c37d96380d9dc61bfdaf05fe5c0b7653e01/numpy-2.3.3-cp312-cp312-win32.whl", hash = "sha256:5534ed6b92f9b7dca6c0a19d6df12d41c68b991cef051d108f6dbff3babc4ebf", size = 6320800, upload-time = "2025-09-09T15:56:52.499Z" },
- { url = "https://files.pythonhosted.org/packages/32/ee/de999f2625b80d043d6d2d628c07d0d5555a677a3cf78fdf868d409b8766/numpy-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:497d7cad08e7092dba36e3d296fe4c97708c93daf26643a1ae4b03f6294d30eb", size = 12786615, upload-time = "2025-09-09T15:56:54.422Z" },
- { url = "https://files.pythonhosted.org/packages/49/6e/b479032f8a43559c383acb20816644f5f91c88f633d9271ee84f3b3a996c/numpy-2.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:ca0309a18d4dfea6fc6262a66d06c26cfe4640c3926ceec90e57791a82b6eee5", size = 10195936, upload-time = "2025-09-09T15:56:56.541Z" },
- { url = "https://files.pythonhosted.org/packages/7d/b9/984c2b1ee61a8b803bf63582b4ac4242cf76e2dbd663efeafcb620cc0ccb/numpy-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f5415fb78995644253370985342cd03572ef8620b934da27d77377a2285955bf", size = 20949588, upload-time = "2025-09-09T15:56:59.087Z" },
- { url = "https://files.pythonhosted.org/packages/a6/e4/07970e3bed0b1384d22af1e9912527ecbeb47d3b26e9b6a3bced068b3bea/numpy-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d00de139a3324e26ed5b95870ce63be7ec7352171bc69a4cf1f157a48e3eb6b7", size = 14177802, upload-time = "2025-09-09T15:57:01.73Z" },
- { url = "https://files.pythonhosted.org/packages/35/c7/477a83887f9de61f1203bad89cf208b7c19cc9fef0cebef65d5a1a0619f2/numpy-2.3.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9dc13c6a5829610cc07422bc74d3ac083bd8323f14e2827d992f9e52e22cd6a6", size = 5106537, upload-time = "2025-09-09T15:57:03.765Z" },
- { url = "https://files.pythonhosted.org/packages/52/47/93b953bd5866a6f6986344d045a207d3f1cfbad99db29f534ea9cee5108c/numpy-2.3.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:d79715d95f1894771eb4e60fb23f065663b2298f7d22945d66877aadf33d00c7", size = 6640743, upload-time = "2025-09-09T15:57:07.921Z" },
- { url = "https://files.pythonhosted.org/packages/23/83/377f84aaeb800b64c0ef4de58b08769e782edcefa4fea712910b6f0afd3c/numpy-2.3.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:952cfd0748514ea7c3afc729a0fc639e61655ce4c55ab9acfab14bda4f402b4c", size = 14278881, upload-time = "2025-09-09T15:57:11.349Z" },
- { url = "https://files.pythonhosted.org/packages/9a/a5/bf3db6e66c4b160d6ea10b534c381a1955dfab34cb1017ea93aa33c70ed3/numpy-2.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b83648633d46f77039c29078751f80da65aa64d5622a3cd62aaef9d835b6c93", size = 16636301, upload-time = "2025-09-09T15:57:14.245Z" },
- { url = "https://files.pythonhosted.org/packages/a2/59/1287924242eb4fa3f9b3a2c30400f2e17eb2707020d1c5e3086fe7330717/numpy-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b001bae8cea1c7dfdb2ae2b017ed0a6f2102d7a70059df1e338e307a4c78a8ae", size = 16053645, upload-time = "2025-09-09T15:57:16.534Z" },
- { url = "https://files.pythonhosted.org/packages/e6/93/b3d47ed882027c35e94ac2320c37e452a549f582a5e801f2d34b56973c97/numpy-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e9aced64054739037d42fb84c54dd38b81ee238816c948c8f3ed134665dcd86", size = 18578179, upload-time = "2025-09-09T15:57:18.883Z" },
- { url = "https://files.pythonhosted.org/packages/20/d9/487a2bccbf7cc9d4bfc5f0f197761a5ef27ba870f1e3bbb9afc4bbe3fcc2/numpy-2.3.3-cp313-cp313-win32.whl", hash = "sha256:9591e1221db3f37751e6442850429b3aabf7026d3b05542d102944ca7f00c8a8", size = 6312250, upload-time = "2025-09-09T15:57:21.296Z" },
- { url = "https://files.pythonhosted.org/packages/1b/b5/263ebbbbcede85028f30047eab3d58028d7ebe389d6493fc95ae66c636ab/numpy-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f0dadeb302887f07431910f67a14d57209ed91130be0adea2f9793f1a4f817cf", size = 12783269, upload-time = "2025-09-09T15:57:23.034Z" },
- { url = "https://files.pythonhosted.org/packages/fa/75/67b8ca554bbeaaeb3fac2e8bce46967a5a06544c9108ec0cf5cece559b6c/numpy-2.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:3c7cf302ac6e0b76a64c4aecf1a09e51abd9b01fc7feee80f6c43e3ab1b1dbc5", size = 10195314, upload-time = "2025-09-09T15:57:25.045Z" },
- { url = "https://files.pythonhosted.org/packages/11/d0/0d1ddec56b162042ddfafeeb293bac672de9b0cfd688383590090963720a/numpy-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:eda59e44957d272846bb407aad19f89dc6f58fecf3504bd144f4c5cf81a7eacc", size = 21048025, upload-time = "2025-09-09T15:57:27.257Z" },
- { url = "https://files.pythonhosted.org/packages/36/9e/1996ca6b6d00415b6acbdd3c42f7f03ea256e2c3f158f80bd7436a8a19f3/numpy-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:823d04112bc85ef5c4fda73ba24e6096c8f869931405a80aa8b0e604510a26bc", size = 14301053, upload-time = "2025-09-09T15:57:30.077Z" },
- { url = "https://files.pythonhosted.org/packages/05/24/43da09aa764c68694b76e84b3d3f0c44cb7c18cdc1ba80e48b0ac1d2cd39/numpy-2.3.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:40051003e03db4041aa325da2a0971ba41cf65714e65d296397cc0e32de6018b", size = 5229444, upload-time = "2025-09-09T15:57:32.733Z" },
- { url = "https://files.pythonhosted.org/packages/bc/14/50ffb0f22f7218ef8af28dd089f79f68289a7a05a208db9a2c5dcbe123c1/numpy-2.3.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6ee9086235dd6ab7ae75aba5662f582a81ced49f0f1c6de4260a78d8f2d91a19", size = 6738039, upload-time = "2025-09-09T15:57:34.328Z" },
- { url = "https://files.pythonhosted.org/packages/55/52/af46ac0795e09657d45a7f4db961917314377edecf66db0e39fa7ab5c3d3/numpy-2.3.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94fcaa68757c3e2e668ddadeaa86ab05499a70725811e582b6a9858dd472fb30", size = 14352314, upload-time = "2025-09-09T15:57:36.255Z" },
- { url = "https://files.pythonhosted.org/packages/a7/b1/dc226b4c90eb9f07a3fff95c2f0db3268e2e54e5cce97c4ac91518aee71b/numpy-2.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da1a74b90e7483d6ce5244053399a614b1d6b7bc30a60d2f570e5071f8959d3e", size = 16701722, upload-time = "2025-09-09T15:57:38.622Z" },
- { url = "https://files.pythonhosted.org/packages/9d/9d/9d8d358f2eb5eced14dba99f110d83b5cd9a4460895230f3b396ad19a323/numpy-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2990adf06d1ecee3b3dcbb4977dfab6e9f09807598d647f04d385d29e7a3c3d3", size = 16132755, upload-time = "2025-09-09T15:57:41.16Z" },
- { url = "https://files.pythonhosted.org/packages/b6/27/b3922660c45513f9377b3fb42240bec63f203c71416093476ec9aa0719dc/numpy-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ed635ff692483b8e3f0fcaa8e7eb8a75ee71aa6d975388224f70821421800cea", size = 18651560, upload-time = "2025-09-09T15:57:43.459Z" },
- { url = "https://files.pythonhosted.org/packages/5b/8e/3ab61a730bdbbc201bb245a71102aa609f0008b9ed15255500a99cd7f780/numpy-2.3.3-cp313-cp313t-win32.whl", hash = "sha256:a333b4ed33d8dc2b373cc955ca57babc00cd6f9009991d9edc5ddbc1bac36bcd", size = 6442776, upload-time = "2025-09-09T15:57:45.793Z" },
- { url = "https://files.pythonhosted.org/packages/1c/3a/e22b766b11f6030dc2decdeff5c2fb1610768055603f9f3be88b6d192fb2/numpy-2.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4384a169c4d8f97195980815d6fcad04933a7e1ab3b530921c3fef7a1c63426d", size = 12927281, upload-time = "2025-09-09T15:57:47.492Z" },
- { url = "https://files.pythonhosted.org/packages/7b/42/c2e2bc48c5e9b2a83423f99733950fbefd86f165b468a3d85d52b30bf782/numpy-2.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:75370986cc0bc66f4ce5110ad35aae6d182cc4ce6433c40ad151f53690130bf1", size = 10265275, upload-time = "2025-09-09T15:57:49.647Z" },
- { url = "https://files.pythonhosted.org/packages/6b/01/342ad585ad82419b99bcf7cebe99e61da6bedb89e213c5fd71acc467faee/numpy-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cd052f1fa6a78dee696b58a914b7229ecfa41f0a6d96dc663c1220a55e137593", size = 20951527, upload-time = "2025-09-09T15:57:52.006Z" },
- { url = "https://files.pythonhosted.org/packages/ef/d8/204e0d73fc1b7a9ee80ab1fe1983dd33a4d64a4e30a05364b0208e9a241a/numpy-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:414a97499480067d305fcac9716c29cf4d0d76db6ebf0bf3cbce666677f12652", size = 14186159, upload-time = "2025-09-09T15:57:54.407Z" },
- { url = "https://files.pythonhosted.org/packages/22/af/f11c916d08f3a18fb8ba81ab72b5b74a6e42ead4c2846d270eb19845bf74/numpy-2.3.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:50a5fe69f135f88a2be9b6ca0481a68a136f6febe1916e4920e12f1a34e708a7", size = 5114624, upload-time = "2025-09-09T15:57:56.5Z" },
- { url = "https://files.pythonhosted.org/packages/fb/11/0ed919c8381ac9d2ffacd63fd1f0c34d27e99cab650f0eb6f110e6ae4858/numpy-2.3.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:b912f2ed2b67a129e6a601e9d93d4fa37bef67e54cac442a2f588a54afe5c67a", size = 6642627, upload-time = "2025-09-09T15:57:58.206Z" },
- { url = "https://files.pythonhosted.org/packages/ee/83/deb5f77cb0f7ba6cb52b91ed388b47f8f3c2e9930d4665c600408d9b90b9/numpy-2.3.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9e318ee0596d76d4cb3d78535dc005fa60e5ea348cd131a51e99d0bdbe0b54fe", size = 14296926, upload-time = "2025-09-09T15:58:00.035Z" },
- { url = "https://files.pythonhosted.org/packages/77/cc/70e59dcb84f2b005d4f306310ff0a892518cc0c8000a33d0e6faf7ca8d80/numpy-2.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce020080e4a52426202bdb6f7691c65bb55e49f261f31a8f506c9f6bc7450421", size = 16638958, upload-time = "2025-09-09T15:58:02.738Z" },
- { url = "https://files.pythonhosted.org/packages/b6/5a/b2ab6c18b4257e099587d5b7f903317bd7115333ad8d4ec4874278eafa61/numpy-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e6687dc183aa55dae4a705b35f9c0f8cb178bcaa2f029b241ac5356221d5c021", size = 16071920, upload-time = "2025-09-09T15:58:05.029Z" },
- { url = "https://files.pythonhosted.org/packages/b8/f1/8b3fdc44324a259298520dd82147ff648979bed085feeacc1250ef1656c0/numpy-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d8f3b1080782469fdc1718c4ed1d22549b5fb12af0d57d35e992158a772a37cf", size = 18577076, upload-time = "2025-09-09T15:58:07.745Z" },
- { url = "https://files.pythonhosted.org/packages/f0/a1/b87a284fb15a42e9274e7fcea0dad259d12ddbf07c1595b26883151ca3b4/numpy-2.3.3-cp314-cp314-win32.whl", hash = "sha256:cb248499b0bc3be66ebd6578b83e5acacf1d6cb2a77f2248ce0e40fbec5a76d0", size = 6366952, upload-time = "2025-09-09T15:58:10.096Z" },
- { url = "https://files.pythonhosted.org/packages/70/5f/1816f4d08f3b8f66576d8433a66f8fa35a5acfb3bbd0bf6c31183b003f3d/numpy-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:691808c2b26b0f002a032c73255d0bd89751425f379f7bcd22d140db593a96e8", size = 12919322, upload-time = "2025-09-09T15:58:12.138Z" },
- { url = "https://files.pythonhosted.org/packages/8c/de/072420342e46a8ea41c324a555fa90fcc11637583fb8df722936aed1736d/numpy-2.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:9ad12e976ca7b10f1774b03615a2a4bab8addce37ecc77394d8e986927dc0dfe", size = 10478630, upload-time = "2025-09-09T15:58:14.64Z" },
- { url = "https://files.pythonhosted.org/packages/d5/df/ee2f1c0a9de7347f14da5dd3cd3c3b034d1b8607ccb6883d7dd5c035d631/numpy-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9cc48e09feb11e1db00b320e9d30a4151f7369afb96bd0e48d942d09da3a0d00", size = 21047987, upload-time = "2025-09-09T15:58:16.889Z" },
- { url = "https://files.pythonhosted.org/packages/d6/92/9453bdc5a4e9e69cf4358463f25e8260e2ffc126d52e10038b9077815989/numpy-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:901bf6123879b7f251d3631967fd574690734236075082078e0571977c6a8e6a", size = 14301076, upload-time = "2025-09-09T15:58:20.343Z" },
- { url = "https://files.pythonhosted.org/packages/13/77/1447b9eb500f028bb44253105bd67534af60499588a5149a94f18f2ca917/numpy-2.3.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:7f025652034199c301049296b59fa7d52c7e625017cae4c75d8662e377bf487d", size = 5229491, upload-time = "2025-09-09T15:58:22.481Z" },
- { url = "https://files.pythonhosted.org/packages/3d/f9/d72221b6ca205f9736cb4b2ce3b002f6e45cd67cd6a6d1c8af11a2f0b649/numpy-2.3.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:533ca5f6d325c80b6007d4d7fb1984c303553534191024ec6a524a4c92a5935a", size = 6737913, upload-time = "2025-09-09T15:58:24.569Z" },
- { url = "https://files.pythonhosted.org/packages/3c/5f/d12834711962ad9c46af72f79bb31e73e416ee49d17f4c797f72c96b6ca5/numpy-2.3.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0edd58682a399824633b66885d699d7de982800053acf20be1eaa46d92009c54", size = 14352811, upload-time = "2025-09-09T15:58:26.416Z" },
- { url = "https://files.pythonhosted.org/packages/a1/0d/fdbec6629d97fd1bebed56cd742884e4eead593611bbe1abc3eb40d304b2/numpy-2.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:367ad5d8fbec5d9296d18478804a530f1191e24ab4d75ab408346ae88045d25e", size = 16702689, upload-time = "2025-09-09T15:58:28.831Z" },
- { url = "https://files.pythonhosted.org/packages/9b/09/0a35196dc5575adde1eb97ddfbc3e1687a814f905377621d18ca9bc2b7dd/numpy-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8f6ac61a217437946a1fa48d24c47c91a0c4f725237871117dea264982128097", size = 16133855, upload-time = "2025-09-09T15:58:31.349Z" },
- { url = "https://files.pythonhosted.org/packages/7a/ca/c9de3ea397d576f1b6753eaa906d4cdef1bf97589a6d9825a349b4729cc2/numpy-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:179a42101b845a816d464b6fe9a845dfaf308fdfc7925387195570789bb2c970", size = 18652520, upload-time = "2025-09-09T15:58:33.762Z" },
- { url = "https://files.pythonhosted.org/packages/fd/c2/e5ed830e08cd0196351db55db82f65bc0ab05da6ef2b72a836dcf1936d2f/numpy-2.3.3-cp314-cp314t-win32.whl", hash = "sha256:1250c5d3d2562ec4174bce2e3a1523041595f9b651065e4a4473f5f48a6bc8a5", size = 6515371, upload-time = "2025-09-09T15:58:36.04Z" },
- { url = "https://files.pythonhosted.org/packages/47/c7/b0f6b5b67f6788a0725f744496badbb604d226bf233ba716683ebb47b570/numpy-2.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:b37a0b2e5935409daebe82c1e42274d30d9dd355852529eab91dab8dcca7419f", size = 13112576, upload-time = "2025-09-09T15:58:37.927Z" },
- { url = "https://files.pythonhosted.org/packages/06/b9/33bba5ff6fb679aa0b1f8a07e853f002a6b04b9394db3069a1270a7784ca/numpy-2.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:78c9f6560dc7e6b3990e32df7ea1a50bbd0e2a111e05209963f5ddcab7073b0b", size = 10545953, upload-time = "2025-09-09T15:58:40.576Z" },
-]
-
-[[package]]
-name = "oauthlib"
-version = "3.3.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" },
-]
-
-[[package]]
-name = "openai"
-version = "1.107.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
- { name = "distro" },
- { name = "httpx" },
- { name = "jiter" },
- { name = "pydantic" },
- { name = "sniffio" },
- { name = "tqdm" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/88/67/d6498de300f83ff57a79cb7aa96ef3bef8d6f070c3ded0f1b5b45442a6bc/openai-1.107.0.tar.gz", hash = "sha256:43e04927584e57d0e9e640ee0077c78baf8150098be96ebd5c512539b6c4e9a4", size = 566056, upload-time = "2025-09-08T19:25:47.604Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/91/ed/e8a4fd20390f2858b95227c288df8fe0c835f7c77625f7583609161684ba/openai-1.107.0-py3-none-any.whl", hash = "sha256:3dcfa3cbb116bd6924b27913b8da28c4a787379ff60049588547a1013e6d6438", size = 950968, upload-time = "2025-09-08T19:25:45.552Z" },
-]
-
-[[package]]
-name = "orjson"
-version = "3.11.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/be/4d/8df5f83256a809c22c4d6792ce8d43bb503be0fb7a8e4da9025754b09658/orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a", size = 5482394, upload-time = "2025-08-26T17:46:43.171Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3d/b0/a7edab2a00cdcb2688e1c943401cb3236323e7bfd2839815c6131a3742f4/orjson-3.11.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8c752089db84333e36d754c4baf19c0e1437012242048439c7e80eb0e6426e3b", size = 238259, upload-time = "2025-08-26T17:45:15.093Z" },
- { url = "https://files.pythonhosted.org/packages/e1/c6/ff4865a9cc398a07a83342713b5932e4dc3cb4bf4bc04e8f83dedfc0d736/orjson-3.11.3-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9b8761b6cf04a856eb544acdd82fc594b978f12ac3602d6374a7edb9d86fd2c2", size = 127633, upload-time = "2025-08-26T17:45:16.417Z" },
- { url = "https://files.pythonhosted.org/packages/6e/e6/e00bea2d9472f44fe8794f523e548ce0ad51eb9693cf538a753a27b8bda4/orjson-3.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b13974dc8ac6ba22feaa867fc19135a3e01a134b4f7c9c28162fed4d615008a", size = 123061, upload-time = "2025-08-26T17:45:17.673Z" },
- { url = "https://files.pythonhosted.org/packages/54/31/9fbb78b8e1eb3ac605467cb846e1c08d0588506028b37f4ee21f978a51d4/orjson-3.11.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f83abab5bacb76d9c821fd5c07728ff224ed0e52d7a71b7b3de822f3df04e15c", size = 127956, upload-time = "2025-08-26T17:45:19.172Z" },
- { url = "https://files.pythonhosted.org/packages/36/88/b0604c22af1eed9f98d709a96302006915cfd724a7ebd27d6dd11c22d80b/orjson-3.11.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6fbaf48a744b94091a56c62897b27c31ee2da93d826aa5b207131a1e13d4064", size = 130790, upload-time = "2025-08-26T17:45:20.586Z" },
- { url = "https://files.pythonhosted.org/packages/0e/9d/1c1238ae9fffbfed51ba1e507731b3faaf6b846126a47e9649222b0fd06f/orjson-3.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc779b4f4bba2847d0d2940081a7b6f7b5877e05408ffbb74fa1faf4a136c424", size = 132385, upload-time = "2025-08-26T17:45:22.036Z" },
- { url = "https://files.pythonhosted.org/packages/a3/b5/c06f1b090a1c875f337e21dd71943bc9d84087f7cdf8c6e9086902c34e42/orjson-3.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd4b909ce4c50faa2192da6bb684d9848d4510b736b0611b6ab4020ea6fd2d23", size = 135305, upload-time = "2025-08-26T17:45:23.4Z" },
- { url = "https://files.pythonhosted.org/packages/a0/26/5f028c7d81ad2ebbf84414ba6d6c9cac03f22f5cd0d01eb40fb2d6a06b07/orjson-3.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524b765ad888dc5518bbce12c77c2e83dee1ed6b0992c1790cc5fb49bb4b6667", size = 132875, upload-time = "2025-08-26T17:45:25.182Z" },
- { url = "https://files.pythonhosted.org/packages/fe/d4/b8df70d9cfb56e385bf39b4e915298f9ae6c61454c8154a0f5fd7efcd42e/orjson-3.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:84fd82870b97ae3cdcea9d8746e592b6d40e1e4d4527835fc520c588d2ded04f", size = 130940, upload-time = "2025-08-26T17:45:27.209Z" },
- { url = "https://files.pythonhosted.org/packages/da/5e/afe6a052ebc1a4741c792dd96e9f65bf3939d2094e8b356503b68d48f9f5/orjson-3.11.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fbecb9709111be913ae6879b07bafd4b0785b44c1eb5cac8ac76da048b3885a1", size = 403852, upload-time = "2025-08-26T17:45:28.478Z" },
- { url = "https://files.pythonhosted.org/packages/f8/90/7bbabafeb2ce65915e9247f14a56b29c9334003536009ef5b122783fe67e/orjson-3.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9dba358d55aee552bd868de348f4736ca5a4086d9a62e2bfbbeeb5629fe8b0cc", size = 146293, upload-time = "2025-08-26T17:45:29.86Z" },
- { url = "https://files.pythonhosted.org/packages/27/b3/2d703946447da8b093350570644a663df69448c9d9330e5f1d9cce997f20/orjson-3.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eabcf2e84f1d7105f84580e03012270c7e97ecb1fb1618bda395061b2a84a049", size = 135470, upload-time = "2025-08-26T17:45:31.243Z" },
- { url = "https://files.pythonhosted.org/packages/38/70/b14dcfae7aff0e379b0119c8a812f8396678919c431efccc8e8a0263e4d9/orjson-3.11.3-cp312-cp312-win32.whl", hash = "sha256:3782d2c60b8116772aea8d9b7905221437fdf53e7277282e8d8b07c220f96cca", size = 136248, upload-time = "2025-08-26T17:45:32.567Z" },
- { url = "https://files.pythonhosted.org/packages/35/b8/9e3127d65de7fff243f7f3e53f59a531bf6bb295ebe5db024c2503cc0726/orjson-3.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:79b44319268af2eaa3e315b92298de9a0067ade6e6003ddaef72f8e0bedb94f1", size = 131437, upload-time = "2025-08-26T17:45:34.949Z" },
- { url = "https://files.pythonhosted.org/packages/51/92/a946e737d4d8a7fd84a606aba96220043dcc7d6988b9e7551f7f6d5ba5ad/orjson-3.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:0e92a4e83341ef79d835ca21b8bd13e27c859e4e9e4d7b63defc6e58462a3710", size = 125978, upload-time = "2025-08-26T17:45:36.422Z" },
- { url = "https://files.pythonhosted.org/packages/fc/79/8932b27293ad35919571f77cb3693b5906cf14f206ef17546052a241fdf6/orjson-3.11.3-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:af40c6612fd2a4b00de648aa26d18186cd1322330bd3a3cc52f87c699e995810", size = 238127, upload-time = "2025-08-26T17:45:38.146Z" },
- { url = "https://files.pythonhosted.org/packages/1c/82/cb93cd8cf132cd7643b30b6c5a56a26c4e780c7a145db6f83de977b540ce/orjson-3.11.3-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:9f1587f26c235894c09e8b5b7636a38091a9e6e7fe4531937534749c04face43", size = 127494, upload-time = "2025-08-26T17:45:39.57Z" },
- { url = "https://files.pythonhosted.org/packages/a4/b8/2d9eb181a9b6bb71463a78882bcac1027fd29cf62c38a40cc02fc11d3495/orjson-3.11.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61dcdad16da5bb486d7227a37a2e789c429397793a6955227cedbd7252eb5a27", size = 123017, upload-time = "2025-08-26T17:45:40.876Z" },
- { url = "https://files.pythonhosted.org/packages/b4/14/a0e971e72d03b509190232356d54c0f34507a05050bd026b8db2bf2c192c/orjson-3.11.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11c6d71478e2cbea0a709e8a06365fa63da81da6498a53e4c4f065881d21ae8f", size = 127898, upload-time = "2025-08-26T17:45:42.188Z" },
- { url = "https://files.pythonhosted.org/packages/8e/af/dc74536722b03d65e17042cc30ae586161093e5b1f29bccda24765a6ae47/orjson-3.11.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff94112e0098470b665cb0ed06efb187154b63649403b8d5e9aedeb482b4548c", size = 130742, upload-time = "2025-08-26T17:45:43.511Z" },
- { url = "https://files.pythonhosted.org/packages/62/e6/7a3b63b6677bce089fe939353cda24a7679825c43a24e49f757805fc0d8a/orjson-3.11.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae8b756575aaa2a855a75192f356bbda11a89169830e1439cfb1a3e1a6dde7be", size = 132377, upload-time = "2025-08-26T17:45:45.525Z" },
- { url = "https://files.pythonhosted.org/packages/fc/cd/ce2ab93e2e7eaf518f0fd15e3068b8c43216c8a44ed82ac2b79ce5cef72d/orjson-3.11.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9416cc19a349c167ef76135b2fe40d03cea93680428efee8771f3e9fb66079d", size = 135313, upload-time = "2025-08-26T17:45:46.821Z" },
- { url = "https://files.pythonhosted.org/packages/d0/b4/f98355eff0bd1a38454209bbc73372ce351ba29933cb3e2eba16c04b9448/orjson-3.11.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b822caf5b9752bc6f246eb08124c3d12bf2175b66ab74bac2ef3bbf9221ce1b2", size = 132908, upload-time = "2025-08-26T17:45:48.126Z" },
- { url = "https://files.pythonhosted.org/packages/eb/92/8f5182d7bc2a1bed46ed960b61a39af8389f0ad476120cd99e67182bfb6d/orjson-3.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:414f71e3bdd5573893bf5ecdf35c32b213ed20aa15536fe2f588f946c318824f", size = 130905, upload-time = "2025-08-26T17:45:49.414Z" },
- { url = "https://files.pythonhosted.org/packages/1a/60/c41ca753ce9ffe3d0f67b9b4c093bdd6e5fdb1bc53064f992f66bb99954d/orjson-3.11.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:828e3149ad8815dc14468f36ab2a4b819237c155ee1370341b91ea4c8672d2ee", size = 403812, upload-time = "2025-08-26T17:45:51.085Z" },
- { url = "https://files.pythonhosted.org/packages/dd/13/e4a4f16d71ce1868860db59092e78782c67082a8f1dc06a3788aef2b41bc/orjson-3.11.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac9e05f25627ffc714c21f8dfe3a579445a5c392a9c8ae7ba1d0e9fb5333f56e", size = 146277, upload-time = "2025-08-26T17:45:52.851Z" },
- { url = "https://files.pythonhosted.org/packages/8d/8b/bafb7f0afef9344754a3a0597a12442f1b85a048b82108ef2c956f53babd/orjson-3.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e44fbe4000bd321d9f3b648ae46e0196d21577cf66ae684a96ff90b1f7c93633", size = 135418, upload-time = "2025-08-26T17:45:54.806Z" },
- { url = "https://files.pythonhosted.org/packages/60/d4/bae8e4f26afb2c23bea69d2f6d566132584d1c3a5fe89ee8c17b718cab67/orjson-3.11.3-cp313-cp313-win32.whl", hash = "sha256:2039b7847ba3eec1f5886e75e6763a16e18c68a63efc4b029ddf994821e2e66b", size = 136216, upload-time = "2025-08-26T17:45:57.182Z" },
- { url = "https://files.pythonhosted.org/packages/88/76/224985d9f127e121c8cad882cea55f0ebe39f97925de040b75ccd4b33999/orjson-3.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:29be5ac4164aa8bdcba5fa0700a3c9c316b411d8ed9d39ef8a882541bd452fae", size = 131362, upload-time = "2025-08-26T17:45:58.56Z" },
- { url = "https://files.pythonhosted.org/packages/e2/cf/0dce7a0be94bd36d1346be5067ed65ded6adb795fdbe3abd234c8d576d01/orjson-3.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:18bd1435cb1f2857ceb59cfb7de6f92593ef7b831ccd1b9bfb28ca530e539dce", size = 125989, upload-time = "2025-08-26T17:45:59.95Z" },
- { url = "https://files.pythonhosted.org/packages/ef/77/d3b1fef1fc6aaeed4cbf3be2b480114035f4df8fa1a99d2dac1d40d6e924/orjson-3.11.3-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cf4b81227ec86935568c7edd78352a92e97af8da7bd70bdfdaa0d2e0011a1ab4", size = 238115, upload-time = "2025-08-26T17:46:01.669Z" },
- { url = "https://files.pythonhosted.org/packages/e4/6d/468d21d49bb12f900052edcfbf52c292022d0a323d7828dc6376e6319703/orjson-3.11.3-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:bc8bc85b81b6ac9fc4dae393a8c159b817f4c2c9dee5d12b773bddb3b95fc07e", size = 127493, upload-time = "2025-08-26T17:46:03.466Z" },
- { url = "https://files.pythonhosted.org/packages/67/46/1e2588700d354aacdf9e12cc2d98131fb8ac6f31ca65997bef3863edb8ff/orjson-3.11.3-cp314-cp314-manylinux_2_34_aarch64.whl", hash = "sha256:88dcfc514cfd1b0de038443c7b3e6a9797ffb1b3674ef1fd14f701a13397f82d", size = 122998, upload-time = "2025-08-26T17:46:04.803Z" },
- { url = "https://files.pythonhosted.org/packages/3b/94/11137c9b6adb3779f1b34fd98be51608a14b430dbc02c6d41134fbba484c/orjson-3.11.3-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d61cd543d69715d5fc0a690c7c6f8dcc307bc23abef9738957981885f5f38229", size = 132915, upload-time = "2025-08-26T17:46:06.237Z" },
- { url = "https://files.pythonhosted.org/packages/10/61/dccedcf9e9bcaac09fdabe9eaee0311ca92115699500efbd31950d878833/orjson-3.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2b7b153ed90ababadbef5c3eb39549f9476890d339cf47af563aea7e07db2451", size = 130907, upload-time = "2025-08-26T17:46:07.581Z" },
- { url = "https://files.pythonhosted.org/packages/0e/fd/0e935539aa7b08b3ca0f817d73034f7eb506792aae5ecc3b7c6e679cdf5f/orjson-3.11.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7909ae2460f5f494fecbcd10613beafe40381fd0316e35d6acb5f3a05bfda167", size = 403852, upload-time = "2025-08-26T17:46:08.982Z" },
- { url = "https://files.pythonhosted.org/packages/4a/2b/50ae1a5505cd1043379132fdb2adb8a05f37b3e1ebffe94a5073321966fd/orjson-3.11.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:2030c01cbf77bc67bee7eef1e7e31ecf28649353987775e3583062c752da0077", size = 146309, upload-time = "2025-08-26T17:46:10.576Z" },
- { url = "https://files.pythonhosted.org/packages/cd/1d/a473c158e380ef6f32753b5f39a69028b25ec5be331c2049a2201bde2e19/orjson-3.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a0169ebd1cbd94b26c7a7ad282cf5c2744fce054133f959e02eb5265deae1872", size = 135424, upload-time = "2025-08-26T17:46:12.386Z" },
- { url = "https://files.pythonhosted.org/packages/da/09/17d9d2b60592890ff7382e591aa1d9afb202a266b180c3d4049b1ec70e4a/orjson-3.11.3-cp314-cp314-win32.whl", hash = "sha256:0c6d7328c200c349e3a4c6d8c83e0a5ad029bdc2d417f234152bf34842d0fc8d", size = 136266, upload-time = "2025-08-26T17:46:13.853Z" },
- { url = "https://files.pythonhosted.org/packages/15/58/358f6846410a6b4958b74734727e582ed971e13d335d6c7ce3e47730493e/orjson-3.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:317bbe2c069bbc757b1a2e4105b64aacd3bc78279b66a6b9e51e846e4809f804", size = 131351, upload-time = "2025-08-26T17:46:15.27Z" },
- { url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985, upload-time = "2025-08-26T17:46:16.67Z" },
-]
-
-[[package]]
-name = "ormsgpack"
-version = "1.10.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/92/36/44eed5ef8ce93cded76a576780bab16425ce7876f10d3e2e6265e46c21ea/ormsgpack-1.10.0.tar.gz", hash = "sha256:7f7a27efd67ef22d7182ec3b7fa7e9d147c3ad9be2a24656b23c989077e08b16", size = 58629, upload-time = "2025-05-24T19:07:53.944Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/99/95/f3ab1a7638f6aa9362e87916bb96087fbbc5909db57e19f12ad127560e1e/ormsgpack-1.10.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4e159d50cd4064d7540e2bc6a0ab66eab70b0cc40c618b485324ee17037527c0", size = 376806, upload-time = "2025-05-24T19:07:17.221Z" },
- { url = "https://files.pythonhosted.org/packages/6c/2b/42f559f13c0b0f647b09d749682851d47c1a7e48308c43612ae6833499c8/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb47c85f3a866e29279d801115b554af0fefc409e2ed8aa90aabfa77efe5cc6", size = 204433, upload-time = "2025-05-24T19:07:18.569Z" },
- { url = "https://files.pythonhosted.org/packages/45/42/1ca0cb4d8c80340a89a4af9e6d8951fb8ba0d076a899d2084eadf536f677/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c28249574934534c9bd5dce5485c52f21bcea0ee44d13ece3def6e3d2c3798b5", size = 215547, upload-time = "2025-05-24T19:07:20.245Z" },
- { url = "https://files.pythonhosted.org/packages/0a/38/184a570d7c44c0260bc576d1daaac35b2bfd465a50a08189518505748b9a/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1957dcadbb16e6a981cd3f9caef9faf4c2df1125e2a1b702ee8236a55837ce07", size = 216746, upload-time = "2025-05-24T19:07:21.83Z" },
- { url = "https://files.pythonhosted.org/packages/69/2f/1aaffd08f6b7fdc2a57336a80bdfb8df24e6a65ada5aa769afecfcbc6cc6/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3b29412558c740bf6bac156727aa85ac67f9952cd6f071318f29ee72e1a76044", size = 384783, upload-time = "2025-05-24T19:07:23.674Z" },
- { url = "https://files.pythonhosted.org/packages/a9/63/3e53d6f43bb35e00c98f2b8ab2006d5138089ad254bc405614fbf0213502/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6933f350c2041ec189fe739f0ba7d6117c8772f5bc81f45b97697a84d03020dd", size = 479076, upload-time = "2025-05-24T19:07:25.047Z" },
- { url = "https://files.pythonhosted.org/packages/b8/19/fa1121b03b61402bb4d04e35d164e2320ef73dfb001b57748110319dd014/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a86de06d368fcc2e58b79dece527dc8ca831e0e8b9cec5d6e633d2777ec93d0", size = 390447, upload-time = "2025-05-24T19:07:26.568Z" },
- { url = "https://files.pythonhosted.org/packages/b0/0d/73143ecb94ac4a5dcba223402139240a75dee0cc6ba8a543788a5646407a/ormsgpack-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:35fa9f81e5b9a0dab42e09a73f7339ecffdb978d6dbf9deb2ecf1e9fc7808722", size = 121401, upload-time = "2025-05-24T19:07:28.308Z" },
- { url = "https://files.pythonhosted.org/packages/61/f8/ec5f4e03268d0097545efaab2893aa63f171cf2959cb0ea678a5690e16a1/ormsgpack-1.10.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:8d816d45175a878993b7372bd5408e0f3ec5a40f48e2d5b9d8f1cc5d31b61f1f", size = 376806, upload-time = "2025-05-24T19:07:29.555Z" },
- { url = "https://files.pythonhosted.org/packages/c1/19/b3c53284aad1e90d4d7ed8c881a373d218e16675b8b38e3569d5b40cc9b8/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a90345ccb058de0f35262893751c603b6376b05f02be2b6f6b7e05d9dd6d5643", size = 204433, upload-time = "2025-05-24T19:07:30.977Z" },
- { url = "https://files.pythonhosted.org/packages/09/0b/845c258f59df974a20a536c06cace593698491defdd3d026a8a5f9b6e745/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144b5e88f1999433e54db9d637bae6fe21e935888be4e3ac3daecd8260bd454e", size = 215549, upload-time = "2025-05-24T19:07:32.345Z" },
- { url = "https://files.pythonhosted.org/packages/61/56/57fce8fb34ca6c9543c026ebebf08344c64dbb7b6643d6ddd5355d37e724/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2190b352509d012915921cca76267db136cd026ddee42f1b0d9624613cc7058c", size = 216747, upload-time = "2025-05-24T19:07:34.075Z" },
- { url = "https://files.pythonhosted.org/packages/b8/3f/655b5f6a2475c8d209f5348cfbaaf73ce26237b92d79ef2ad439407dd0fa/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:86fd9c1737eaba43d3bb2730add9c9e8b5fbed85282433705dd1b1e88ea7e6fb", size = 384785, upload-time = "2025-05-24T19:07:35.83Z" },
- { url = "https://files.pythonhosted.org/packages/4b/94/687a0ad8afd17e4bce1892145d6a1111e58987ddb176810d02a1f3f18686/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:33afe143a7b61ad21bb60109a86bb4e87fec70ef35db76b89c65b17e32da7935", size = 479076, upload-time = "2025-05-24T19:07:37.533Z" },
- { url = "https://files.pythonhosted.org/packages/c8/34/68925232e81e0e062a2f0ac678f62aa3b6f7009d6a759e19324dbbaebae7/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f23d45080846a7b90feabec0d330a9cc1863dc956728412e4f7986c80ab3a668", size = 390446, upload-time = "2025-05-24T19:07:39.469Z" },
- { url = "https://files.pythonhosted.org/packages/12/ad/f4e1a36a6d1714afb7ffb74b3ababdcb96529cf4e7a216f9f7c8eda837b6/ormsgpack-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:534d18acb805c75e5fba09598bf40abe1851c853247e61dda0c01f772234da69", size = 121399, upload-time = "2025-05-24T19:07:40.854Z" },
-]
-
-[[package]]
-name = "packaging"
-version = "25.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
-]
-
-[[package]]
-name = "pandas"
-version = "2.3.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "numpy" },
- { name = "python-dateutil" },
- { name = "pytz" },
- { name = "tzdata" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/79/8e/0e90233ac205ad182bd6b422532695d2b9414944a280488105d598c70023/pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb", size = 4488684, upload-time = "2025-08-21T10:28:29.257Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ec/db/614c20fb7a85a14828edd23f1c02db58a30abf3ce76f38806155d160313c/pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9", size = 11587652, upload-time = "2025-08-21T10:27:15.888Z" },
- { url = "https://files.pythonhosted.org/packages/99/b0/756e52f6582cade5e746f19bad0517ff27ba9c73404607c0306585c201b3/pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b", size = 10717686, upload-time = "2025-08-21T10:27:18.486Z" },
- { url = "https://files.pythonhosted.org/packages/37/4c/dd5ccc1e357abfeee8353123282de17997f90ff67855f86154e5a13b81e5/pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175", size = 11278722, upload-time = "2025-08-21T10:27:21.149Z" },
- { url = "https://files.pythonhosted.org/packages/d3/a4/f7edcfa47e0a88cda0be8b068a5bae710bf264f867edfdf7b71584ace362/pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9", size = 11987803, upload-time = "2025-08-21T10:27:23.767Z" },
- { url = "https://files.pythonhosted.org/packages/f6/61/1bce4129f93ab66f1c68b7ed1c12bac6a70b1b56c5dab359c6bbcd480b52/pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4", size = 12766345, upload-time = "2025-08-21T10:27:26.6Z" },
- { url = "https://files.pythonhosted.org/packages/8e/46/80d53de70fee835531da3a1dae827a1e76e77a43ad22a8cd0f8142b61587/pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811", size = 13439314, upload-time = "2025-08-21T10:27:29.213Z" },
- { url = "https://files.pythonhosted.org/packages/28/30/8114832daff7489f179971dbc1d854109b7f4365a546e3ea75b6516cea95/pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae", size = 10983326, upload-time = "2025-08-21T10:27:31.901Z" },
- { url = "https://files.pythonhosted.org/packages/27/64/a2f7bf678af502e16b472527735d168b22b7824e45a4d7e96a4fbb634b59/pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e", size = 11531061, upload-time = "2025-08-21T10:27:34.647Z" },
- { url = "https://files.pythonhosted.org/packages/54/4c/c3d21b2b7769ef2f4c2b9299fcadd601efa6729f1357a8dbce8dd949ed70/pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9", size = 10668666, upload-time = "2025-08-21T10:27:37.203Z" },
- { url = "https://files.pythonhosted.org/packages/50/e2/f775ba76ecfb3424d7f5862620841cf0edb592e9abd2d2a5387d305fe7a8/pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a", size = 11332835, upload-time = "2025-08-21T10:27:40.188Z" },
- { url = "https://files.pythonhosted.org/packages/8f/52/0634adaace9be2d8cac9ef78f05c47f3a675882e068438b9d7ec7ef0c13f/pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b", size = 12057211, upload-time = "2025-08-21T10:27:43.117Z" },
- { url = "https://files.pythonhosted.org/packages/0b/9d/2df913f14b2deb9c748975fdb2491da1a78773debb25abbc7cbc67c6b549/pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6", size = 12749277, upload-time = "2025-08-21T10:27:45.474Z" },
- { url = "https://files.pythonhosted.org/packages/87/af/da1a2417026bd14d98c236dba88e39837182459d29dcfcea510b2ac9e8a1/pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a", size = 13415256, upload-time = "2025-08-21T10:27:49.885Z" },
- { url = "https://files.pythonhosted.org/packages/22/3c/f2af1ce8840ef648584a6156489636b5692c162771918aa95707c165ad2b/pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b", size = 10982579, upload-time = "2025-08-21T10:28:08.435Z" },
- { url = "https://files.pythonhosted.org/packages/f3/98/8df69c4097a6719e357dc249bf437b8efbde808038268e584421696cbddf/pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57", size = 12028163, upload-time = "2025-08-21T10:27:52.232Z" },
- { url = "https://files.pythonhosted.org/packages/0e/23/f95cbcbea319f349e10ff90db488b905c6883f03cbabd34f6b03cbc3c044/pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2", size = 11391860, upload-time = "2025-08-21T10:27:54.673Z" },
- { url = "https://files.pythonhosted.org/packages/ad/1b/6a984e98c4abee22058aa75bfb8eb90dce58cf8d7296f8bc56c14bc330b0/pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9", size = 11309830, upload-time = "2025-08-21T10:27:56.957Z" },
- { url = "https://files.pythonhosted.org/packages/15/d5/f0486090eb18dd8710bf60afeaf638ba6817047c0c8ae5c6a25598665609/pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2", size = 11883216, upload-time = "2025-08-21T10:27:59.302Z" },
- { url = "https://files.pythonhosted.org/packages/10/86/692050c119696da19e20245bbd650d8dfca6ceb577da027c3a73c62a047e/pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012", size = 12699743, upload-time = "2025-08-21T10:28:02.447Z" },
- { url = "https://files.pythonhosted.org/packages/cd/d7/612123674d7b17cf345aad0a10289b2a384bff404e0463a83c4a3a59d205/pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370", size = 13186141, upload-time = "2025-08-21T10:28:05.377Z" },
-]
-
-[[package]]
-name = "parsimonious"
-version = "0.10.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "regex" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/7b/91/abdc50c4ef06fdf8d047f60ee777ca9b2a7885e1a9cea81343fbecda52d7/parsimonious-0.10.0.tar.gz", hash = "sha256:8281600da180ec8ae35427a4ab4f7b82bfec1e3d1e52f80cb60ea82b9512501c", size = 52172, upload-time = "2022-09-03T17:01:17.004Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/aa/0f/c8b64d9b54ea631fcad4e9e3c8dbe8c11bb32a623be94f22974c88e71eaf/parsimonious-0.10.0-py3-none-any.whl", hash = "sha256:982ab435fabe86519b57f6b35610aa4e4e977e9f02a14353edf4bbc75369fc0f", size = 48427, upload-time = "2022-09-03T17:01:13.814Z" },
-]
-
-[[package]]
-name = "pathspec"
-version = "0.12.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
-]
-
-[[package]]
-name = "pinata-python"
-version = "1.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pytest" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ae/e0/1826d50f359d55cb6c80e3fdb4e433edd80e912288b42a9d7ec22f249318/pinata-python-1.0.0.tar.gz", hash = "sha256:5fb4f2c4495f6422751a413cb5a3443403a6642c4fe98578259bbf292211d277", size = 10645, upload-time = "2022-04-25T16:35:50.538Z" }
-
-[[package]]
-name = "pluggy"
-version = "1.6.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
-]
-
-[[package]]
-name = "primp"
-version = "0.15.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/56/0b/a87556189da4de1fc6360ca1aa05e8335509633f836cdd06dd17f0743300/primp-0.15.0.tar.gz", hash = "sha256:1af8ea4b15f57571ff7fc5e282a82c5eb69bc695e19b8ddeeda324397965b30a", size = 113022, upload-time = "2025-04-17T11:41:05.315Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f5/5a/146ac964b99ea7657ad67eb66f770be6577dfe9200cb28f9a95baffd6c3f/primp-0.15.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1b281f4ca41a0c6612d4c6e68b96e28acfe786d226a427cd944baa8d7acd644f", size = 3178914, upload-time = "2025-04-17T11:40:59.558Z" },
- { url = "https://files.pythonhosted.org/packages/bc/8a/cc2321e32db3ce64d6e32950d5bcbea01861db97bfb20b5394affc45b387/primp-0.15.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:489cbab55cd793ceb8f90bb7423c6ea64ebb53208ffcf7a044138e3c66d77299", size = 2955079, upload-time = "2025-04-17T11:40:57.398Z" },
- { url = "https://files.pythonhosted.org/packages/c3/7b/cbd5d999a07ff2a21465975d4eb477ae6f69765e8fe8c9087dab250180d8/primp-0.15.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c18b45c23f94016215f62d2334552224236217aaeb716871ce0e4dcfa08eb161", size = 3281018, upload-time = "2025-04-17T11:40:55.308Z" },
- { url = "https://files.pythonhosted.org/packages/1b/6e/a6221c612e61303aec2bcac3f0a02e8b67aee8c0db7bdc174aeb8010f975/primp-0.15.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e985a9cba2e3f96a323722e5440aa9eccaac3178e74b884778e926b5249df080", size = 3255229, upload-time = "2025-04-17T11:40:47.811Z" },
- { url = "https://files.pythonhosted.org/packages/3b/54/bfeef5aca613dc660a69d0760a26c6b8747d8fdb5a7f20cb2cee53c9862f/primp-0.15.0-cp38-abi3-manylinux_2_34_armv7l.whl", hash = "sha256:6b84a6ffa083e34668ff0037221d399c24d939b5629cd38223af860de9e17a83", size = 3014522, upload-time = "2025-04-17T11:40:50.191Z" },
- { url = "https://files.pythonhosted.org/packages/ac/96/84078e09f16a1dad208f2fe0f8a81be2cf36e024675b0f9eec0c2f6e2182/primp-0.15.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:592f6079646bdf5abbbfc3b0a28dac8de943f8907a250ce09398cda5eaebd260", size = 3418567, upload-time = "2025-04-17T11:41:01.595Z" },
- { url = "https://files.pythonhosted.org/packages/6c/80/8a7a9587d3eb85be3d0b64319f2f690c90eb7953e3f73a9ddd9e46c8dc42/primp-0.15.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5a728e5a05f37db6189eb413d22c78bd143fa59dd6a8a26dacd43332b3971fe8", size = 3606279, upload-time = "2025-04-17T11:41:03.61Z" },
- { url = "https://files.pythonhosted.org/packages/0c/dd/f0183ed0145e58cf9d286c1b2c14f63ccee987a4ff79ac85acc31b5d86bd/primp-0.15.0-cp38-abi3-win_amd64.whl", hash = "sha256:aeb6bd20b06dfc92cfe4436939c18de88a58c640752cf7f30d9e4ae893cdec32", size = 3149967, upload-time = "2025-04-17T11:41:07.067Z" },
-]
-
-[[package]]
-name = "propcache"
-version = "0.3.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" },
- { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" },
- { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" },
- { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" },
- { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" },
- { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" },
- { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" },
- { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" },
- { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" },
- { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" },
- { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" },
- { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" },
- { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" },
- { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" },
- { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" },
- { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" },
- { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" },
- { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" },
- { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" },
- { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" },
- { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" },
- { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" },
- { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" },
- { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" },
- { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" },
- { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" },
- { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" },
- { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" },
- { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" },
- { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" },
- { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" },
- { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" },
- { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" },
- { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" },
- { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" },
- { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" },
- { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" },
- { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" },
- { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" },
- { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" },
- { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" },
- { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" },
- { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" },
- { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" },
- { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" },
- { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" },
- { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" },
- { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" },
- { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" },
-]
-
-[[package]]
-name = "proto-plus"
-version = "1.26.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "protobuf" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142, upload-time = "2025-03-10T15:54:38.843Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163, upload-time = "2025-03-10T15:54:37.335Z" },
-]
-
-[[package]]
-name = "protobuf"
-version = "6.32.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/c0/df/fb4a8eeea482eca989b51cffd274aac2ee24e825f0bf3cbce5281fa1567b/protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2", size = 440614, upload-time = "2025-08-14T21:21:25.015Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/33/18/df8c87da2e47f4f1dcc5153a81cd6bca4e429803f4069a299e236e4dd510/protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741", size = 424409, upload-time = "2025-08-14T21:21:12.366Z" },
- { url = "https://files.pythonhosted.org/packages/e1/59/0a820b7310f8139bd8d5a9388e6a38e1786d179d6f33998448609296c229/protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e", size = 435735, upload-time = "2025-08-14T21:21:15.046Z" },
- { url = "https://files.pythonhosted.org/packages/cc/5b/0d421533c59c789e9c9894683efac582c06246bf24bb26b753b149bd88e4/protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0", size = 426449, upload-time = "2025-08-14T21:21:16.687Z" },
- { url = "https://files.pythonhosted.org/packages/ec/7b/607764ebe6c7a23dcee06e054fd1de3d5841b7648a90fd6def9a3bb58c5e/protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1", size = 322869, upload-time = "2025-08-14T21:21:18.282Z" },
- { url = "https://files.pythonhosted.org/packages/40/01/2e730bd1c25392fc32e3268e02446f0d77cb51a2c3a8486b1798e34d5805/protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c", size = 322009, upload-time = "2025-08-14T21:21:19.893Z" },
- { url = "https://files.pythonhosted.org/packages/9c/f2/80ffc4677aac1bc3519b26bc7f7f5de7fce0ee2f7e36e59e27d8beb32dd1/protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783", size = 169287, upload-time = "2025-08-14T21:21:23.515Z" },
-]
-
-[[package]]
-name = "pyasn1"
-version = "0.6.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" },
-]
-
-[[package]]
-name = "pyasn1-modules"
-version = "0.4.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pyasn1" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" },
-]
-
-[[package]]
-name = "pycparser"
-version = "2.23"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
-]
-
-[[package]]
-name = "pycryptodome"
-version = "3.23.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/04/5d/bdb09489b63cd34a976cc9e2a8d938114f7a53a74d3dd4f125ffa49dce82/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:0011f7f00cdb74879142011f95133274741778abba114ceca229adbf8e62c3e4", size = 2495152, upload-time = "2025-05-17T17:20:20.833Z" },
- { url = "https://files.pythonhosted.org/packages/a7/ce/7840250ed4cc0039c433cd41715536f926d6e86ce84e904068eb3244b6a6/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:90460fc9e088ce095f9ee8356722d4f10f86e5be06e2354230a9880b9c549aae", size = 1639348, upload-time = "2025-05-17T17:20:23.171Z" },
- { url = "https://files.pythonhosted.org/packages/ee/f0/991da24c55c1f688d6a3b5a11940567353f74590734ee4a64294834ae472/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4764e64b269fc83b00f682c47443c2e6e85b18273712b98aa43bcb77f8570477", size = 2184033, upload-time = "2025-05-17T17:20:25.424Z" },
- { url = "https://files.pythonhosted.org/packages/54/16/0e11882deddf00f68b68dd4e8e442ddc30641f31afeb2bc25588124ac8de/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8f24adb74984aa0e5d07a2368ad95276cf38051fe2dc6605cbcf482e04f2a7", size = 2270142, upload-time = "2025-05-17T17:20:27.808Z" },
- { url = "https://files.pythonhosted.org/packages/d5/fc/4347fea23a3f95ffb931f383ff28b3f7b1fe868739182cb76718c0da86a1/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d97618c9c6684a97ef7637ba43bdf6663a2e2e77efe0f863cce97a76af396446", size = 2309384, upload-time = "2025-05-17T17:20:30.765Z" },
- { url = "https://files.pythonhosted.org/packages/6e/d9/c5261780b69ce66d8cfab25d2797bd6e82ba0241804694cd48be41add5eb/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a53a4fe5cb075075d515797d6ce2f56772ea7e6a1e5e4b96cf78a14bac3d265", size = 2183237, upload-time = "2025-05-17T17:20:33.736Z" },
- { url = "https://files.pythonhosted.org/packages/5a/6f/3af2ffedd5cfa08c631f89452c6648c4d779e7772dfc388c77c920ca6bbf/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:763d1d74f56f031788e5d307029caef067febf890cd1f8bf61183ae142f1a77b", size = 2343898, upload-time = "2025-05-17T17:20:36.086Z" },
- { url = "https://files.pythonhosted.org/packages/9a/dc/9060d807039ee5de6e2f260f72f3d70ac213993a804f5e67e0a73a56dd2f/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:954af0e2bd7cea83ce72243b14e4fb518b18f0c1649b576d114973e2073b273d", size = 2269197, upload-time = "2025-05-17T17:20:38.414Z" },
- { url = "https://files.pythonhosted.org/packages/f9/34/e6c8ca177cb29dcc4967fef73f5de445912f93bd0343c9c33c8e5bf8cde8/pycryptodome-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:257bb3572c63ad8ba40b89f6fc9d63a2a628e9f9708d31ee26560925ebe0210a", size = 1768600, upload-time = "2025-05-17T17:20:40.688Z" },
- { url = "https://files.pythonhosted.org/packages/e4/1d/89756b8d7ff623ad0160f4539da571d1f594d21ee6d68be130a6eccb39a4/pycryptodome-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6501790c5b62a29fcb227bd6b62012181d886a767ce9ed03b303d1f22eb5c625", size = 1799740, upload-time = "2025-05-17T17:20:42.413Z" },
- { url = "https://files.pythonhosted.org/packages/5d/61/35a64f0feaea9fd07f0d91209e7be91726eb48c0f1bfc6720647194071e4/pycryptodome-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9a77627a330ab23ca43b48b130e202582e91cc69619947840ea4d2d1be21eb39", size = 1703685, upload-time = "2025-05-17T17:20:44.388Z" },
- { url = "https://files.pythonhosted.org/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" },
- { url = "https://files.pythonhosted.org/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" },
- { url = "https://files.pythonhosted.org/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" },
- { url = "https://files.pythonhosted.org/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" },
- { url = "https://files.pythonhosted.org/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" },
- { url = "https://files.pythonhosted.org/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" },
- { url = "https://files.pythonhosted.org/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" },
- { url = "https://files.pythonhosted.org/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" },
- { url = "https://files.pythonhosted.org/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" },
- { url = "https://files.pythonhosted.org/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" },
- { url = "https://files.pythonhosted.org/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" },
-]
-
-[[package]]
-name = "pydantic"
-version = "2.11.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "annotated-types" },
- { name = "pydantic-core" },
- { name = "typing-extensions" },
- { name = "typing-inspection" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" },
-]
-
-[[package]]
-name = "pydantic-core"
-version = "2.33.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
- { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
- { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
- { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
- { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
- { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
- { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
- { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
- { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
- { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
- { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
- { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
- { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
- { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
- { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
- { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
- { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
- { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
- { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
- { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
- { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
- { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
- { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
- { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
- { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
- { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
- { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
- { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
- { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
- { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
- { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
-]
-
-[[package]]
-name = "pydantic-settings"
-version = "2.10.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pydantic" },
- { name = "python-dotenv" },
- { name = "typing-inspection" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/68/85/1ea668bbab3c50071ca613c6ab30047fb36ab0da1b92fa8f17bbc38fd36c/pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee", size = 172583, upload-time = "2025-06-24T13:26:46.841Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" },
-]
-
-[[package]]
-name = "pygithub"
-version = "2.6.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "deprecated" },
- { name = "pyjwt", extra = ["crypto"] },
- { name = "pynacl" },
- { name = "requests" },
- { name = "typing-extensions" },
- { name = "urllib3" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c0/88/e08ab18dc74b2916f48703ed1a797d57cb64eca0e23b0a9254e13cfe3911/pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf", size = 3659473, upload-time = "2025-02-21T13:45:58.262Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ac/fc/a444cd19ccc8c4946a512f3827ed0b3565c88488719d800d54a75d541c0b/PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3", size = 410451, upload-time = "2025-02-21T13:45:55.519Z" },
-]
-
-[[package]]
-name = "pygments"
-version = "2.19.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
-]
-
-[[package]]
-name = "pyjwt"
-version = "2.10.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
-]
-
-[package.optional-dependencies]
-crypto = [
- { name = "cryptography" },
-]
-
-[[package]]
-name = "pynacl"
-version = "1.5.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cffi" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a7/22/27582568be639dfe22ddb3902225f91f2f17ceff88ce80e4db396c8986da/PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba", size = 3392854, upload-time = "2022-01-07T22:05:41.134Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ce/75/0b8ede18506041c0bf23ac4d8e2971b4161cd6ce630b177d0a08eb0d8857/PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1", size = 349920, upload-time = "2022-01-07T22:05:49.156Z" },
- { url = "https://files.pythonhosted.org/packages/59/bb/fddf10acd09637327a97ef89d2a9d621328850a72f1fdc8c08bdf72e385f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92", size = 601722, upload-time = "2022-01-07T22:05:50.989Z" },
- { url = "https://files.pythonhosted.org/packages/5d/70/87a065c37cca41a75f2ce113a5a2c2aa7533be648b184ade58971b5f7ccc/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394", size = 680087, upload-time = "2022-01-07T22:05:52.539Z" },
- { url = "https://files.pythonhosted.org/packages/ee/87/f1bb6a595f14a327e8285b9eb54d41fef76c585a0edef0a45f6fc95de125/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d", size = 856678, upload-time = "2022-01-07T22:05:54.251Z" },
- { url = "https://files.pythonhosted.org/packages/66/28/ca86676b69bf9f90e710571b67450508484388bfce09acf8a46f0b8c785f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858", size = 1133660, upload-time = "2022-01-07T22:05:56.056Z" },
- { url = "https://files.pythonhosted.org/packages/3d/85/c262db650e86812585e2bc59e497a8f59948a005325a11bbbc9ecd3fe26b/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b", size = 663824, upload-time = "2022-01-07T22:05:57.434Z" },
- { url = "https://files.pythonhosted.org/packages/fd/1a/cc308a884bd299b651f1633acb978e8596c71c33ca85e9dc9fa33a5399b9/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff", size = 1117912, upload-time = "2022-01-07T22:05:58.665Z" },
- { url = "https://files.pythonhosted.org/packages/25/2d/b7df6ddb0c2a33afdb358f8af6ea3b8c4d1196ca45497dd37a56f0c122be/PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543", size = 204624, upload-time = "2022-01-07T22:06:00.085Z" },
- { url = "https://files.pythonhosted.org/packages/5e/22/d3db169895faaf3e2eda892f005f433a62db2decbcfbc2f61e6517adfa87/PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", size = 212141, upload-time = "2022-01-07T22:06:01.861Z" },
-]
-
-[[package]]
-name = "pyparsing"
-version = "3.2.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608, upload-time = "2025-03-25T05:01:28.114Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120, upload-time = "2025-03-25T05:01:24.908Z" },
-]
-
-[[package]]
-name = "pypdf"
-version = "5.8.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/28/5a/139b1a3ec3789cc77a7cb9d5d3bc9e97e742e6d03708baeb7719f8ad0827/pypdf-5.8.0.tar.gz", hash = "sha256:f8332f80606913e6f0ce65488a870833c9d99ccdb988c17bb6c166f7c8e140cb", size = 5029494, upload-time = "2025-07-13T12:51:35.125Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8b/94/05d0310bfa92c26aa50a9d2dea2c6448a1febfdfcf98fb340a99d48a3078/pypdf-5.8.0-py3-none-any.whl", hash = "sha256:bfe861285cd2f79cceecefde2d46901e4ee992a9f4b42c56548c4a6e9236a0d1", size = 309718, upload-time = "2025-07-13T12:51:33.159Z" },
-]
-
-[[package]]
-name = "pytest"
-version = "8.4.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
- { name = "iniconfig" },
- { name = "packaging" },
- { name = "pluggy" },
- { name = "pygments" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" },
-]
-
-[[package]]
-name = "pytest-mock"
-version = "3.14.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pytest" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
-]
-
-[[package]]
-name = "python-dateutil"
-version = "2.9.0.post0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "six" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
-]
-
-[[package]]
-name = "python-dotenv"
-version = "1.1.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
-]
-
-[[package]]
-name = "pytz"
-version = "2025.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
-]
-
-[[package]]
-name = "pyyaml"
-version = "6.0.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" },
- { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" },
- { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" },
- { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" },
- { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" },
- { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" },
- { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" },
- { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" },
- { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
- { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" },
- { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" },
- { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" },
- { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" },
- { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" },
- { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" },
- { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" },
- { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" },
- { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
-]
-
-[[package]]
-name = "regex"
-version = "2025.9.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b2/5a/4c63457fbcaf19d138d72b2e9b39405954f98c0349b31c601bfcb151582c/regex-2025.9.1.tar.gz", hash = "sha256:88ac07b38d20b54d79e704e38aa3bd2c0f8027432164226bdee201a1c0c9c9ff", size = 400852, upload-time = "2025-09-01T22:10:10.479Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/39/ef/a0372febc5a1d44c1be75f35d7e5aff40c659ecde864d7fa10e138f75e74/regex-2025.9.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:84a25164bd8dcfa9f11c53f561ae9766e506e580b70279d05a7946510bdd6f6a", size = 486317, upload-time = "2025-09-01T22:08:34.529Z" },
- { url = "https://files.pythonhosted.org/packages/b5/25/d64543fb7eb41a1024786d518cc57faf1ce64aa6e9ddba097675a0c2f1d2/regex-2025.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:645e88a73861c64c1af558dd12294fb4e67b5c1eae0096a60d7d8a2143a611c7", size = 289698, upload-time = "2025-09-01T22:08:36.162Z" },
- { url = "https://files.pythonhosted.org/packages/d8/dc/fbf31fc60be317bd9f6f87daa40a8a9669b3b392aa8fe4313df0a39d0722/regex-2025.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:10a450cba5cd5409526ee1d4449f42aad38dd83ac6948cbd6d7f71ca7018f7db", size = 287242, upload-time = "2025-09-01T22:08:37.794Z" },
- { url = "https://files.pythonhosted.org/packages/0f/74/f933a607a538f785da5021acf5323961b4620972e2c2f1f39b6af4b71db7/regex-2025.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9dc5991592933a4192c166eeb67b29d9234f9c86344481173d1bc52f73a7104", size = 797441, upload-time = "2025-09-01T22:08:39.108Z" },
- { url = "https://files.pythonhosted.org/packages/89/d0/71fc49b4f20e31e97f199348b8c4d6e613e7b6a54a90eb1b090c2b8496d7/regex-2025.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a32291add816961aab472f4fad344c92871a2ee33c6c219b6598e98c1f0108f2", size = 862654, upload-time = "2025-09-01T22:08:40.586Z" },
- { url = "https://files.pythonhosted.org/packages/59/05/984edce1411a5685ba9abbe10d42cdd9450aab4a022271f9585539788150/regex-2025.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:588c161a68a383478e27442a678e3b197b13c5ba51dbba40c1ccb8c4c7bee9e9", size = 910862, upload-time = "2025-09-01T22:08:42.416Z" },
- { url = "https://files.pythonhosted.org/packages/b2/02/5c891bb5fe0691cc1bad336e3a94b9097fbcf9707ec8ddc1dce9f0397289/regex-2025.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47829ffaf652f30d579534da9085fe30c171fa2a6744a93d52ef7195dc38218b", size = 801991, upload-time = "2025-09-01T22:08:44.072Z" },
- { url = "https://files.pythonhosted.org/packages/f1/ae/fd10d6ad179910f7a1b3e0a7fde1ef8bb65e738e8ac4fd6ecff3f52252e4/regex-2025.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e978e5a35b293ea43f140c92a3269b6ab13fe0a2bf8a881f7ac740f5a6ade85", size = 786651, upload-time = "2025-09-01T22:08:46.079Z" },
- { url = "https://files.pythonhosted.org/packages/30/cf/9d686b07bbc5bf94c879cc168db92542d6bc9fb67088d03479fef09ba9d3/regex-2025.9.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4cf09903e72411f4bf3ac1eddd624ecfd423f14b2e4bf1c8b547b72f248b7bf7", size = 856556, upload-time = "2025-09-01T22:08:48.376Z" },
- { url = "https://files.pythonhosted.org/packages/91/9d/302f8a29bb8a49528abbab2d357a793e2a59b645c54deae0050f8474785b/regex-2025.9.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d016b0f77be63e49613c9e26aaf4a242f196cd3d7a4f15898f5f0ab55c9b24d2", size = 849001, upload-time = "2025-09-01T22:08:50.067Z" },
- { url = "https://files.pythonhosted.org/packages/93/fa/b4c6dbdedc85ef4caec54c817cd5f4418dbfa2453214119f2538082bf666/regex-2025.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:656563e620de6908cd1c9d4f7b9e0777e3341ca7db9d4383bcaa44709c90281e", size = 788138, upload-time = "2025-09-01T22:08:51.933Z" },
- { url = "https://files.pythonhosted.org/packages/4a/1b/91ee17a3cbf87f81e8c110399279d0e57f33405468f6e70809100f2ff7d8/regex-2025.9.1-cp312-cp312-win32.whl", hash = "sha256:df33f4ef07b68f7ab637b1dbd70accbf42ef0021c201660656601e8a9835de45", size = 264524, upload-time = "2025-09-01T22:08:53.75Z" },
- { url = "https://files.pythonhosted.org/packages/92/28/6ba31cce05b0f1ec6b787921903f83bd0acf8efde55219435572af83c350/regex-2025.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:5aba22dfbc60cda7c0853516104724dc904caa2db55f2c3e6e984eb858d3edf3", size = 275489, upload-time = "2025-09-01T22:08:55.037Z" },
- { url = "https://files.pythonhosted.org/packages/bd/ed/ea49f324db00196e9ef7fe00dd13c6164d5173dd0f1bbe495e61bb1fb09d/regex-2025.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:ec1efb4c25e1849c2685fa95da44bfde1b28c62d356f9c8d861d4dad89ed56e9", size = 268589, upload-time = "2025-09-01T22:08:56.369Z" },
- { url = "https://files.pythonhosted.org/packages/98/25/b2959ce90c6138c5142fe5264ee1f9b71a0c502ca4c7959302a749407c79/regex-2025.9.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bc6834727d1b98d710a63e6c823edf6ffbf5792eba35d3fa119531349d4142ef", size = 485932, upload-time = "2025-09-01T22:08:57.913Z" },
- { url = "https://files.pythonhosted.org/packages/49/2e/6507a2a85f3f2be6643438b7bd976e67ad73223692d6988eb1ff444106d3/regex-2025.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c3dc05b6d579875719bccc5f3037b4dc80433d64e94681a0061845bd8863c025", size = 289568, upload-time = "2025-09-01T22:08:59.258Z" },
- { url = "https://files.pythonhosted.org/packages/c7/d8/de4a4b57215d99868f1640e062a7907e185ec7476b4b689e2345487c1ff4/regex-2025.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22213527df4c985ec4a729b055a8306272d41d2f45908d7bacb79be0fa7a75ad", size = 286984, upload-time = "2025-09-01T22:09:00.835Z" },
- { url = "https://files.pythonhosted.org/packages/03/15/e8cb403403a57ed316e80661db0e54d7aa2efcd85cb6156f33cc18746922/regex-2025.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8e3f6e3c5a5a1adc3f7ea1b5aec89abfc2f4fbfba55dafb4343cd1d084f715b2", size = 797514, upload-time = "2025-09-01T22:09:02.538Z" },
- { url = "https://files.pythonhosted.org/packages/e4/26/2446f2b9585fed61faaa7e2bbce3aca7dd8df6554c32addee4c4caecf24a/regex-2025.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bcb89c02a0d6c2bec9b0bb2d8c78782699afe8434493bfa6b4021cc51503f249", size = 862586, upload-time = "2025-09-01T22:09:04.322Z" },
- { url = "https://files.pythonhosted.org/packages/fd/b8/82ffbe9c0992c31bbe6ae1c4b4e21269a5df2559102b90543c9b56724c3c/regex-2025.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b0e2f95413eb0c651cd1516a670036315b91b71767af83bc8525350d4375ccba", size = 910815, upload-time = "2025-09-01T22:09:05.978Z" },
- { url = "https://files.pythonhosted.org/packages/2f/d8/7303ea38911759c1ee30cc5bc623ee85d3196b733c51fd6703c34290a8d9/regex-2025.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a41dc039e1c97d3c2ed3e26523f748e58c4de3ea7a31f95e1cf9ff973fff5a", size = 802042, upload-time = "2025-09-01T22:09:07.865Z" },
- { url = "https://files.pythonhosted.org/packages/fc/0e/6ad51a55ed4b5af512bb3299a05d33309bda1c1d1e1808fa869a0bed31bc/regex-2025.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f0b4258b161094f66857a26ee938d3fe7b8a5063861e44571215c44fbf0e5df", size = 786764, upload-time = "2025-09-01T22:09:09.362Z" },
- { url = "https://files.pythonhosted.org/packages/8d/d5/394e3ffae6baa5a9217bbd14d96e0e5da47bb069d0dbb8278e2681a2b938/regex-2025.9.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bf70e18ac390e6977ea7e56f921768002cb0fa359c4199606c7219854ae332e0", size = 856557, upload-time = "2025-09-01T22:09:11.129Z" },
- { url = "https://files.pythonhosted.org/packages/cd/80/b288d3910c41194ad081b9fb4b371b76b0bbfdce93e7709fc98df27b37dc/regex-2025.9.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b84036511e1d2bb0a4ff1aec26951caa2dea8772b223c9e8a19ed8885b32dbac", size = 849108, upload-time = "2025-09-01T22:09:12.877Z" },
- { url = "https://files.pythonhosted.org/packages/d1/cd/5ec76bf626d0d5abdc277b7a1734696f5f3d14fbb4a3e2540665bc305d85/regex-2025.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c2e05dcdfe224047f2a59e70408274c325d019aad96227ab959403ba7d58d2d7", size = 788201, upload-time = "2025-09-01T22:09:14.561Z" },
- { url = "https://files.pythonhosted.org/packages/b5/36/674672f3fdead107565a2499f3007788b878188acec6d42bc141c5366c2c/regex-2025.9.1-cp313-cp313-win32.whl", hash = "sha256:3b9a62107a7441b81ca98261808fed30ae36ba06c8b7ee435308806bd53c1ed8", size = 264508, upload-time = "2025-09-01T22:09:16.193Z" },
- { url = "https://files.pythonhosted.org/packages/83/ad/931134539515eb64ce36c24457a98b83c1b2e2d45adf3254b94df3735a76/regex-2025.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:b38afecc10c177eb34cfae68d669d5161880849ba70c05cbfbe409f08cc939d7", size = 275469, upload-time = "2025-09-01T22:09:17.462Z" },
- { url = "https://files.pythonhosted.org/packages/24/8c/96d34e61c0e4e9248836bf86d69cb224fd222f270fa9045b24e218b65604/regex-2025.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:ec329890ad5e7ed9fc292858554d28d58d56bf62cf964faf0aa57964b21155a0", size = 268586, upload-time = "2025-09-01T22:09:18.948Z" },
- { url = "https://files.pythonhosted.org/packages/21/b1/453cbea5323b049181ec6344a803777914074b9726c9c5dc76749966d12d/regex-2025.9.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:72fb7a016467d364546f22b5ae86c45680a4e0de6b2a6f67441d22172ff641f1", size = 486111, upload-time = "2025-09-01T22:09:20.734Z" },
- { url = "https://files.pythonhosted.org/packages/f6/0e/92577f197bd2f7652c5e2857f399936c1876978474ecc5b068c6d8a79c86/regex-2025.9.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c9527fa74eba53f98ad86be2ba003b3ebe97e94b6eb2b916b31b5f055622ef03", size = 289520, upload-time = "2025-09-01T22:09:22.249Z" },
- { url = "https://files.pythonhosted.org/packages/af/c6/b472398116cca7ea5a6c4d5ccd0fc543f7fd2492cb0c48d2852a11972f73/regex-2025.9.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c905d925d194c83a63f92422af7544ec188301451b292c8b487f0543726107ca", size = 287215, upload-time = "2025-09-01T22:09:23.657Z" },
- { url = "https://files.pythonhosted.org/packages/cf/11/f12ecb0cf9ca792a32bb92f758589a84149017467a544f2f6bfb45c0356d/regex-2025.9.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74df7c74a63adcad314426b1f4ea6054a5ab25d05b0244f0c07ff9ce640fa597", size = 797855, upload-time = "2025-09-01T22:09:25.197Z" },
- { url = "https://files.pythonhosted.org/packages/46/88/bbb848f719a540fb5997e71310f16f0b33a92c5d4b4d72d4311487fff2a3/regex-2025.9.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4f6e935e98ea48c7a2e8be44494de337b57a204470e7f9c9c42f912c414cd6f5", size = 863363, upload-time = "2025-09-01T22:09:26.705Z" },
- { url = "https://files.pythonhosted.org/packages/54/a9/2321eb3e2838f575a78d48e03c1e83ea61bd08b74b7ebbdeca8abc50fc25/regex-2025.9.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4a62d033cd9ebefc7c5e466731a508dfabee827d80b13f455de68a50d3c2543d", size = 910202, upload-time = "2025-09-01T22:09:28.906Z" },
- { url = "https://files.pythonhosted.org/packages/33/07/d1d70835d7d11b7e126181f316f7213c4572ecf5c5c97bdbb969fb1f38a2/regex-2025.9.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef971ebf2b93bdc88d8337238be4dfb851cc97ed6808eb04870ef67589415171", size = 801808, upload-time = "2025-09-01T22:09:30.733Z" },
- { url = "https://files.pythonhosted.org/packages/13/d1/29e4d1bed514ef2bf3a4ead3cb8bb88ca8af94130239a4e68aa765c35b1c/regex-2025.9.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d936a1db208bdca0eca1f2bb2c1ba1d8370b226785c1e6db76e32a228ffd0ad5", size = 786824, upload-time = "2025-09-01T22:09:32.61Z" },
- { url = "https://files.pythonhosted.org/packages/33/27/20d8ccb1bee460faaa851e6e7cc4cfe852a42b70caa1dca22721ba19f02f/regex-2025.9.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:7e786d9e4469698fc63815b8de08a89165a0aa851720eb99f5e0ea9d51dd2b6a", size = 857406, upload-time = "2025-09-01T22:09:34.117Z" },
- { url = "https://files.pythonhosted.org/packages/74/fe/60c6132262dc36430d51e0c46c49927d113d3a38c1aba6a26c7744c84cf3/regex-2025.9.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:6b81d7dbc5466ad2c57ce3a0ddb717858fe1a29535c8866f8514d785fdb9fc5b", size = 848593, upload-time = "2025-09-01T22:09:35.598Z" },
- { url = "https://files.pythonhosted.org/packages/cc/ae/2d4ff915622fabbef1af28387bf71e7f2f4944a348b8460d061e85e29bf0/regex-2025.9.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:cd4890e184a6feb0ef195338a6ce68906a8903a0f2eb7e0ab727dbc0a3156273", size = 787951, upload-time = "2025-09-01T22:09:37.139Z" },
- { url = "https://files.pythonhosted.org/packages/85/37/dc127703a9e715a284cc2f7dbdd8a9776fd813c85c126eddbcbdd1ca5fec/regex-2025.9.1-cp314-cp314-win32.whl", hash = "sha256:34679a86230e46164c9e0396b56cab13c0505972343880b9e705083cc5b8ec86", size = 269833, upload-time = "2025-09-01T22:09:39.245Z" },
- { url = "https://files.pythonhosted.org/packages/83/bf/4bed4d3d0570e16771defd5f8f15f7ea2311edcbe91077436d6908956c4a/regex-2025.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:a1196e530a6bfa5f4bde029ac5b0295a6ecfaaffbfffede4bbaf4061d9455b70", size = 278742, upload-time = "2025-09-01T22:09:40.651Z" },
- { url = "https://files.pythonhosted.org/packages/cf/3e/7d7ac6fd085023312421e0d69dfabdfb28e116e513fadbe9afe710c01893/regex-2025.9.1-cp314-cp314-win_arm64.whl", hash = "sha256:f46d525934871ea772930e997d577d48c6983e50f206ff7b66d4ac5f8941e993", size = 271860, upload-time = "2025-09-01T22:09:42.413Z" },
-]
-
-[[package]]
-name = "requests"
-version = "2.32.4"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "certifi" },
- { name = "charset-normalizer" },
- { name = "idna" },
- { name = "urllib3" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" },
-]
-
-[[package]]
-name = "requests-oauthlib"
-version = "2.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "oauthlib" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" },
-]
-
-[[package]]
-name = "requests-toolbelt"
-version = "1.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" },
-]
-
-[[package]]
-name = "rich"
-version = "14.1.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markdown-it-py" },
- { name = "pygments" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fe/75/af448d8e52bf1d8fa6a9d089ca6c07ff4453d86c65c145d0a300bb073b9b/rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8", size = 224441, upload-time = "2025-07-25T07:32:58.125Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" },
-]
-
-[[package]]
-name = "rlp"
-version = "4.1.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "eth-utils" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1b/2d/439b0728a92964a04d9c88ea1ca9ebb128893fbbd5834faa31f987f2fd4c/rlp-4.1.0.tar.gz", hash = "sha256:be07564270a96f3e225e2c107db263de96b5bc1f27722d2855bd3459a08e95a9", size = 33429, upload-time = "2025-02-04T22:05:59.089Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/99/fb/e4c0ced9893b84ac95b7181d69a9786ce5879aeb3bbbcbba80a164f85d6a/rlp-4.1.0-py3-none-any.whl", hash = "sha256:8eca394c579bad34ee0b937aecb96a57052ff3716e19c7a578883e767bc5da6f", size = 19973, upload-time = "2025-02-04T22:05:57.05Z" },
-]
-
-[[package]]
-name = "rsa"
-version = "4.9.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pyasn1" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" },
-]
-
-[[package]]
-name = "ruff"
-version = "0.12.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/9b/ce/8d7dbedede481245b489b769d27e2934730791a9a82765cb94566c6e6abd/ruff-0.12.4.tar.gz", hash = "sha256:13efa16df6c6eeb7d0f091abae50f58e9522f3843edb40d56ad52a5a4a4b6873", size = 5131435, upload-time = "2025-07-17T17:27:19.138Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ae/9f/517bc5f61bad205b7f36684ffa5415c013862dee02f55f38a217bdbe7aa4/ruff-0.12.4-py3-none-linux_armv6l.whl", hash = "sha256:cb0d261dac457ab939aeb247e804125a5d521b21adf27e721895b0d3f83a0d0a", size = 10188824, upload-time = "2025-07-17T17:26:31.412Z" },
- { url = "https://files.pythonhosted.org/packages/28/83/691baae5a11fbbde91df01c565c650fd17b0eabed259e8b7563de17c6529/ruff-0.12.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:55c0f4ca9769408d9b9bac530c30d3e66490bd2beb2d3dae3e4128a1f05c7442", size = 10884521, upload-time = "2025-07-17T17:26:35.084Z" },
- { url = "https://files.pythonhosted.org/packages/d6/8d/756d780ff4076e6dd035d058fa220345f8c458391f7edfb1c10731eedc75/ruff-0.12.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a8224cc3722c9ad9044da7f89c4c1ec452aef2cfe3904365025dd2f51daeae0e", size = 10277653, upload-time = "2025-07-17T17:26:37.897Z" },
- { url = "https://files.pythonhosted.org/packages/8d/97/8eeee0f48ece153206dce730fc9e0e0ca54fd7f261bb3d99c0a4343a1892/ruff-0.12.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9949d01d64fa3672449a51ddb5d7548b33e130240ad418884ee6efa7a229586", size = 10485993, upload-time = "2025-07-17T17:26:40.68Z" },
- { url = "https://files.pythonhosted.org/packages/49/b8/22a43d23a1f68df9b88f952616c8508ea6ce4ed4f15353b8168c48b2d7e7/ruff-0.12.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:be0593c69df9ad1465e8a2d10e3defd111fdb62dcd5be23ae2c06da77e8fcffb", size = 10022824, upload-time = "2025-07-17T17:26:43.564Z" },
- { url = "https://files.pythonhosted.org/packages/cd/70/37c234c220366993e8cffcbd6cadbf332bfc848cbd6f45b02bade17e0149/ruff-0.12.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7dea966bcb55d4ecc4cc3270bccb6f87a337326c9dcd3c07d5b97000dbff41c", size = 11524414, upload-time = "2025-07-17T17:26:46.219Z" },
- { url = "https://files.pythonhosted.org/packages/14/77/c30f9964f481b5e0e29dd6a1fae1f769ac3fd468eb76fdd5661936edd262/ruff-0.12.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:afcfa3ab5ab5dd0e1c39bf286d829e042a15e966b3726eea79528e2e24d8371a", size = 12419216, upload-time = "2025-07-17T17:26:48.883Z" },
- { url = "https://files.pythonhosted.org/packages/6e/79/af7fe0a4202dce4ef62c5e33fecbed07f0178f5b4dd9c0d2fcff5ab4a47c/ruff-0.12.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c057ce464b1413c926cdb203a0f858cd52f3e73dcb3270a3318d1630f6395bb3", size = 11976756, upload-time = "2025-07-17T17:26:51.754Z" },
- { url = "https://files.pythonhosted.org/packages/09/d1/33fb1fc00e20a939c305dbe2f80df7c28ba9193f7a85470b982815a2dc6a/ruff-0.12.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e64b90d1122dc2713330350626b10d60818930819623abbb56535c6466cce045", size = 11020019, upload-time = "2025-07-17T17:26:54.265Z" },
- { url = "https://files.pythonhosted.org/packages/64/f4/e3cd7f7bda646526f09693e2e02bd83d85fff8a8222c52cf9681c0d30843/ruff-0.12.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2abc48f3d9667fdc74022380b5c745873499ff827393a636f7a59da1515e7c57", size = 11277890, upload-time = "2025-07-17T17:26:56.914Z" },
- { url = "https://files.pythonhosted.org/packages/5e/d0/69a85fb8b94501ff1a4f95b7591505e8983f38823da6941eb5b6badb1e3a/ruff-0.12.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2b2449dc0c138d877d629bea151bee8c0ae3b8e9c43f5fcaafcd0c0d0726b184", size = 10348539, upload-time = "2025-07-17T17:26:59.381Z" },
- { url = "https://files.pythonhosted.org/packages/16/a0/91372d1cb1678f7d42d4893b88c252b01ff1dffcad09ae0c51aa2542275f/ruff-0.12.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:56e45bb11f625db55f9b70477062e6a1a04d53628eda7784dce6e0f55fd549eb", size = 10009579, upload-time = "2025-07-17T17:27:02.462Z" },
- { url = "https://files.pythonhosted.org/packages/23/1b/c4a833e3114d2cc0f677e58f1df6c3b20f62328dbfa710b87a1636a5e8eb/ruff-0.12.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:478fccdb82ca148a98a9ff43658944f7ab5ec41c3c49d77cd99d44da019371a1", size = 10942982, upload-time = "2025-07-17T17:27:05.343Z" },
- { url = "https://files.pythonhosted.org/packages/ff/ce/ce85e445cf0a5dd8842f2f0c6f0018eedb164a92bdf3eda51984ffd4d989/ruff-0.12.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0fc426bec2e4e5f4c4f182b9d2ce6a75c85ba9bcdbe5c6f2a74fcb8df437df4b", size = 11343331, upload-time = "2025-07-17T17:27:08.652Z" },
- { url = "https://files.pythonhosted.org/packages/35/cf/441b7fc58368455233cfb5b77206c849b6dfb48b23de532adcc2e50ccc06/ruff-0.12.4-py3-none-win32.whl", hash = "sha256:4de27977827893cdfb1211d42d84bc180fceb7b72471104671c59be37041cf93", size = 10267904, upload-time = "2025-07-17T17:27:11.814Z" },
- { url = "https://files.pythonhosted.org/packages/ce/7e/20af4a0df5e1299e7368d5ea4350412226afb03d95507faae94c80f00afd/ruff-0.12.4-py3-none-win_amd64.whl", hash = "sha256:fe0b9e9eb23736b453143d72d2ceca5db323963330d5b7859d60d101147d461a", size = 11209038, upload-time = "2025-07-17T17:27:14.417Z" },
- { url = "https://files.pythonhosted.org/packages/11/02/8857d0dfb8f44ef299a5dfd898f673edefb71e3b533b3b9d2db4c832dd13/ruff-0.12.4-py3-none-win_arm64.whl", hash = "sha256:0618ec4442a83ab545e5b71202a5c0ed7791e8471435b94e655b570a5031a98e", size = 10469336, upload-time = "2025-07-17T17:27:16.913Z" },
-]
-
-[[package]]
-name = "shellingham"
-version = "1.5.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
-]
-
-[[package]]
-name = "six"
-version = "1.17.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
-]
-
-[[package]]
-name = "sniffio"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
-]
-
-[[package]]
-name = "soupsieve"
-version = "2.8"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" },
-]
-
-[[package]]
-name = "sqlalchemy"
-version = "2.0.43"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "greenlet", marker = "(python_full_version < '3.14' and platform_machine == 'AMD64') or (python_full_version < '3.14' and platform_machine == 'WIN32') or (python_full_version < '3.14' and platform_machine == 'aarch64') or (python_full_version < '3.14' and platform_machine == 'amd64') or (python_full_version < '3.14' and platform_machine == 'ppc64le') or (python_full_version < '3.14' and platform_machine == 'win32') or (python_full_version < '3.14' and platform_machine == 'x86_64')" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/d7/bc/d59b5d97d27229b0e009bd9098cd81af71c2fa5549c580a0a67b9bed0496/sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417", size = 9762949, upload-time = "2025-08-11T14:24:58.438Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/61/db/20c78f1081446095450bdc6ee6cc10045fce67a8e003a5876b6eaafc5cc4/sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24", size = 2134891, upload-time = "2025-08-11T15:51:13.019Z" },
- { url = "https://files.pythonhosted.org/packages/45/0a/3d89034ae62b200b4396f0f95319f7d86e9945ee64d2343dcad857150fa2/sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83", size = 2123061, upload-time = "2025-08-11T15:51:14.319Z" },
- { url = "https://files.pythonhosted.org/packages/cb/10/2711f7ff1805919221ad5bee205971254845c069ee2e7036847103ca1e4c/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9", size = 3320384, upload-time = "2025-08-11T15:52:35.088Z" },
- { url = "https://files.pythonhosted.org/packages/6e/0e/3d155e264d2ed2778484006ef04647bc63f55b3e2d12e6a4f787747b5900/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48", size = 3329648, upload-time = "2025-08-11T15:56:34.153Z" },
- { url = "https://files.pythonhosted.org/packages/5b/81/635100fb19725c931622c673900da5efb1595c96ff5b441e07e3dd61f2be/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687", size = 3258030, upload-time = "2025-08-11T15:52:36.933Z" },
- { url = "https://files.pythonhosted.org/packages/0c/ed/a99302716d62b4965fded12520c1cbb189f99b17a6d8cf77611d21442e47/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe", size = 3294469, upload-time = "2025-08-11T15:56:35.553Z" },
- { url = "https://files.pythonhosted.org/packages/5d/a2/3a11b06715149bf3310b55a98b5c1e84a42cfb949a7b800bc75cb4e33abc/sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d", size = 2098906, upload-time = "2025-08-11T15:55:00.645Z" },
- { url = "https://files.pythonhosted.org/packages/bc/09/405c915a974814b90aa591280623adc6ad6b322f61fd5cff80aeaef216c9/sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a", size = 2126260, upload-time = "2025-08-11T15:55:02.965Z" },
- { url = "https://files.pythonhosted.org/packages/41/1c/a7260bd47a6fae7e03768bf66451437b36451143f36b285522b865987ced/sqlalchemy-2.0.43-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e7c08f57f75a2bb62d7ee80a89686a5e5669f199235c6d1dac75cd59374091c3", size = 2130598, upload-time = "2025-08-11T15:51:15.903Z" },
- { url = "https://files.pythonhosted.org/packages/8e/84/8a337454e82388283830b3586ad7847aa9c76fdd4f1df09cdd1f94591873/sqlalchemy-2.0.43-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:14111d22c29efad445cd5021a70a8b42f7d9152d8ba7f73304c4d82460946aaa", size = 2118415, upload-time = "2025-08-11T15:51:17.256Z" },
- { url = "https://files.pythonhosted.org/packages/cf/ff/22ab2328148492c4d71899d62a0e65370ea66c877aea017a244a35733685/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21b27b56eb2f82653168cefe6cb8e970cdaf4f3a6cb2c5e3c3c1cf3158968ff9", size = 3248707, upload-time = "2025-08-11T15:52:38.444Z" },
- { url = "https://files.pythonhosted.org/packages/dc/29/11ae2c2b981de60187f7cbc84277d9d21f101093d1b2e945c63774477aba/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c5a9da957c56e43d72126a3f5845603da00e0293720b03bde0aacffcf2dc04f", size = 3253602, upload-time = "2025-08-11T15:56:37.348Z" },
- { url = "https://files.pythonhosted.org/packages/b8/61/987b6c23b12c56d2be451bc70900f67dd7d989d52b1ee64f239cf19aec69/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d79f9fdc9584ec83d1b3c75e9f4595c49017f5594fee1a2217117647225d738", size = 3183248, upload-time = "2025-08-11T15:52:39.865Z" },
- { url = "https://files.pythonhosted.org/packages/86/85/29d216002d4593c2ce1c0ec2cec46dda77bfbcd221e24caa6e85eff53d89/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9df7126fd9db49e3a5a3999442cc67e9ee8971f3cb9644250107d7296cb2a164", size = 3219363, upload-time = "2025-08-11T15:56:39.11Z" },
- { url = "https://files.pythonhosted.org/packages/b6/e4/bd78b01919c524f190b4905d47e7630bf4130b9f48fd971ae1c6225b6f6a/sqlalchemy-2.0.43-cp313-cp313-win32.whl", hash = "sha256:7f1ac7828857fcedb0361b48b9ac4821469f7694089d15550bbcf9ab22564a1d", size = 2096718, upload-time = "2025-08-11T15:55:05.349Z" },
- { url = "https://files.pythonhosted.org/packages/ac/a5/ca2f07a2a201f9497de1928f787926613db6307992fe5cda97624eb07c2f/sqlalchemy-2.0.43-cp313-cp313-win_amd64.whl", hash = "sha256:971ba928fcde01869361f504fcff3b7143b47d30de188b11c6357c0505824197", size = 2123200, upload-time = "2025-08-11T15:55:07.932Z" },
- { url = "https://files.pythonhosted.org/packages/b8/d9/13bdde6521f322861fab67473cec4b1cc8999f3871953531cf61945fad92/sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc", size = 1924759, upload-time = "2025-08-11T15:39:53.024Z" },
-]
-
-[[package]]
-name = "starlette"
-version = "0.41.3"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/1a/4c/9b5764bd22eec91c4039ef4c55334e9187085da2d8a2df7bd570869aae18/starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835", size = 2574159, upload-time = "2024-11-18T19:45:04.283Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/96/00/2b325970b3060c7cecebab6d295afe763365822b1306a12eeab198f74323/starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7", size = 73225, upload-time = "2024-11-18T19:45:02.027Z" },
-]
-
-[[package]]
-name = "talos"
-version = "0.1.0"
-source = { editable = "." }
-dependencies = [
- { name = "alembic" },
- { name = "apscheduler" },
- { name = "beautifulsoup4" },
- { name = "duckduckgo-search" },
- { name = "eth-rpc-py" },
- { name = "eth-typeshed-py" },
- { name = "eth-typing" },
- { name = "fastapi" },
- { name = "google-api-python-client" },
- { name = "instructor" },
- { name = "ipfshttpclient" },
- { name = "langchain" },
- { name = "langchain-community" },
- { name = "langchain-openai" },
- { name = "langgraph" },
- { name = "langmem" },
- { name = "langsmith" },
- { name = "numerize" },
- { name = "numpy" },
- { name = "pandas" },
- { name = "pinata-python" },
- { name = "pydantic" },
- { name = "pygithub" },
- { name = "pypdf" },
- { name = "requests" },
- { name = "textblob" },
- { name = "tiktoken" },
- { name = "tweepy" },
- { name = "typer" },
- { name = "uvicorn" },
-]
-
-[package.optional-dependencies]
-dev = [
- { name = "isort" },
- { name = "mypy" },
- { name = "pytest" },
- { name = "pytest-mock" },
- { name = "ruff" },
-]
-
-[package.metadata]
-requires-dist = [
- { name = "alembic", specifier = "==1.14.0" },
- { name = "apscheduler", specifier = "==3.10.4" },
- { name = "beautifulsoup4", specifier = "==4.13.4" },
- { name = "duckduckgo-search", specifier = "==8.1.1" },
- { name = "eth-rpc-py", specifier = "==0.1.34" },
- { name = "eth-typeshed-py", specifier = ">=0.1.34" },
- { name = "eth-typing", specifier = "==5.2.1" },
- { name = "fastapi", specifier = "==0.115.6" },
- { name = "google-api-python-client", specifier = ">=2.176.0" },
- { name = "instructor", specifier = "==1.10.0" },
- { name = "ipfshttpclient", specifier = "==0.7.0" },
- { name = "isort", marker = "extra == 'dev'", specifier = "==5.12.0" },
- { name = "langchain", specifier = "==0.3.26" },
- { name = "langchain-community", specifier = "==0.3.27" },
- { name = "langchain-openai", specifier = "==0.3.28" },
- { name = "langgraph", specifier = ">=0.2.0" },
- { name = "langmem", specifier = ">=0.0.29" },
- { name = "langsmith", specifier = ">=0.1.0" },
- { name = "mypy", marker = "extra == 'dev'", specifier = "==1.17.0" },
- { name = "numerize", specifier = ">=0.12" },
- { name = "numpy", specifier = ">=2.3.3" },
- { name = "pandas", specifier = ">=2.3.2" },
- { name = "pinata-python", specifier = "==1.0.0" },
- { name = "pydantic", specifier = "==2.11.7" },
- { name = "pygithub", specifier = "==2.6.1" },
- { name = "pypdf", specifier = "==5.8.0" },
- { name = "pytest", marker = "extra == 'dev'", specifier = "==8.4.1" },
- { name = "pytest-mock", marker = "extra == 'dev'", specifier = "==3.14.1" },
- { name = "requests", specifier = "==2.32.4" },
- { name = "ruff", marker = "extra == 'dev'", specifier = "==0.12.4" },
- { name = "textblob", specifier = "==0.19.0" },
- { name = "tiktoken", specifier = "==0.9.0" },
- { name = "tweepy", specifier = "==4.16.0" },
- { name = "typer", specifier = "==0.12.5" },
- { name = "uvicorn", specifier = "==0.32.1" },
-]
-provides-extras = ["dev"]
-
-[[package]]
-name = "tenacity"
-version = "9.1.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
-]
-
-[[package]]
-name = "textblob"
-version = "0.19.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "nltk" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/63/a1/31fc6a5e9e46f2d84f72f12048588feac5464486e526dbfcc4719569cd3e/textblob-0.19.0.tar.gz", hash = "sha256:0a3d06a47cf7759441da3418c4843aed3797a998beba2108c6245a2020f83b01", size = 637872, upload-time = "2025-01-13T23:03:07.352Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/d6/40aa5aead775582ea0cf35870e5a3f16fab4b967f1ad2debe675f673f923/textblob-0.19.0-py3-none-any.whl", hash = "sha256:af6b8827886f1ee839a625f4865e5abb1584eae8db2259627b33a6a0b02ef19d", size = 624280, upload-time = "2025-01-13T23:03:01.034Z" },
-]
-
-[[package]]
-name = "tiktoken"
-version = "0.9.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "regex" },
- { name = "requests" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991, upload-time = "2025-02-14T06:03:01.003Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cf/e5/21ff33ecfa2101c1bb0f9b6df750553bd873b7fb532ce2cb276ff40b197f/tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03", size = 1065073, upload-time = "2025-02-14T06:02:24.768Z" },
- { url = "https://files.pythonhosted.org/packages/8e/03/a95e7b4863ee9ceec1c55983e4cc9558bcfd8f4f80e19c4f8a99642f697d/tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210", size = 1008075, upload-time = "2025-02-14T06:02:26.92Z" },
- { url = "https://files.pythonhosted.org/packages/40/10/1305bb02a561595088235a513ec73e50b32e74364fef4de519da69bc8010/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794", size = 1140754, upload-time = "2025-02-14T06:02:28.124Z" },
- { url = "https://files.pythonhosted.org/packages/1b/40/da42522018ca496432ffd02793c3a72a739ac04c3794a4914570c9bb2925/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22", size = 1196678, upload-time = "2025-02-14T06:02:29.845Z" },
- { url = "https://files.pythonhosted.org/packages/5c/41/1e59dddaae270ba20187ceb8aa52c75b24ffc09f547233991d5fd822838b/tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2", size = 1259283, upload-time = "2025-02-14T06:02:33.838Z" },
- { url = "https://files.pythonhosted.org/packages/5b/64/b16003419a1d7728d0d8c0d56a4c24325e7b10a21a9dd1fc0f7115c02f0a/tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16", size = 894897, upload-time = "2025-02-14T06:02:36.265Z" },
- { url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919, upload-time = "2025-02-14T06:02:37.494Z" },
- { url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877, upload-time = "2025-02-14T06:02:39.516Z" },
- { url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095, upload-time = "2025-02-14T06:02:41.791Z" },
- { url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649, upload-time = "2025-02-14T06:02:43Z" },
- { url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465, upload-time = "2025-02-14T06:02:45.046Z" },
- { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669, upload-time = "2025-02-14T06:02:47.341Z" },
-]
-
-[[package]]
-name = "toolz"
-version = "1.0.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790, upload-time = "2024-10-04T16:17:04.001Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383, upload-time = "2024-10-04T16:17:01.533Z" },
-]
-
-[[package]]
-name = "tqdm"
-version = "4.67.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
-]
-
-[[package]]
-name = "trustcall"
-version = "0.0.39"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "dydantic" },
- { name = "jsonpatch" },
- { name = "langgraph" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/2b/72/4cdb54a31952827e8b58e11ea286bbfe2d3aa0ffb77a2f87dbc1c7ea77d3/trustcall-0.0.39.tar.gz", hash = "sha256:ec315818224501b9537ce6b7618dbc21be41210c6e8f2e239169a5a00912cd6e", size = 38637, upload-time = "2025-04-14T22:02:50.857Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2c/3a/58de925a104ce554fc250b833fe76401c7822aa8d65f2002cb53195e6c64/trustcall-0.0.39-py3-none-any.whl", hash = "sha256:d7da42e0bba816c0539b2936dfed90ffb3ea8d789e548e73865d416f8ac4ee64", size = 30073, upload-time = "2025-04-14T22:02:49.402Z" },
-]
-
-[[package]]
-name = "tweepy"
-version = "4.16.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "oauthlib" },
- { name = "requests" },
- { name = "requests-oauthlib" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/6e/45/a73bb812b1817247d3f79b3b9a4784ab93a081853b697e87428caa8c287b/tweepy-4.16.0.tar.gz", hash = "sha256:1d95cbdc50bf6353a387f881f2584eaf60d14e00dbbdd8872a73de79c66878e3", size = 87646, upload-time = "2025-06-22T01:17:51.34Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ee/7c/3375cd1fbefcb8ead580fe324b1b6dcdc21aabf51562ee6def7266fcf363/tweepy-4.16.0-py3-none-any.whl", hash = "sha256:48d1a1eb311d2c4b8990abcfa6f9fa2b2ad61be05c723b1a9b4f242656badae2", size = 98843, upload-time = "2025-06-22T01:17:49.823Z" },
-]
-
-[[package]]
-name = "typer"
-version = "0.12.5"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "rich" },
- { name = "shellingham" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c5/58/a79003b91ac2c6890fc5d90145c662fd5771c6f11447f116b63300436bc9/typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722", size = 98953, upload-time = "2024-08-24T21:17:57.346Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a8/2b/886d13e742e514f704c33c4caa7df0f3b89e5a25ef8db02aa9ca3d9535d5/typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b", size = 47288, upload-time = "2024-08-24T21:17:55.451Z" },
-]
-
-[[package]]
-name = "typing-extensions"
-version = "4.15.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
-]
-
-[[package]]
-name = "typing-inspect"
-version = "0.9.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mypy-extensions" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" },
-]
-
-[[package]]
-name = "typing-inspection"
-version = "0.4.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
-]
-
-[[package]]
-name = "tzdata"
-version = "2025.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
-]
-
-[[package]]
-name = "tzlocal"
-version = "5.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "tzdata", marker = "sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" },
-]
-
-[[package]]
-name = "uritemplate"
-version = "4.2.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/98/60/f174043244c5306c9988380d2cb10009f91563fc4b31293d27e17201af56/uritemplate-4.2.0.tar.gz", hash = "sha256:480c2ed180878955863323eea31b0ede668795de182617fef9c6ca09e6ec9d0e", size = 33267, upload-time = "2025-06-02T15:12:06.318Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a9/99/3ae339466c9183ea5b8ae87b34c0b897eda475d2aec2307cae60e5cd4f29/uritemplate-4.2.0-py3-none-any.whl", hash = "sha256:962201ba1c4edcab02e60f9a0d3821e82dfc5d2d6662a21abd533879bdb8a686", size = 11488, upload-time = "2025-06-02T15:12:03.405Z" },
-]
-
-[[package]]
-name = "urllib3"
-version = "2.5.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
-]
-
-[[package]]
-name = "uvicorn"
-version = "0.32.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "h11" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/6a/3c/21dba3e7d76138725ef307e3d7ddd29b763119b3aa459d02cc05fefcff75/uvicorn-0.32.1.tar.gz", hash = "sha256:ee9519c246a72b1c084cea8d3b44ed6026e78a4a309cbedae9c37e4cb9fbb175", size = 77630, upload-time = "2024-11-20T19:41:13.341Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/50/c1/2d27b0a15826c2b71dcf6e2f5402181ef85acf439617bb2f1453125ce1f3/uvicorn-0.32.1-py3-none-any.whl", hash = "sha256:82ad92fd58da0d12af7482ecdb5f2470a04c9c9a53ced65b9bbb4a205377602e", size = 63828, upload-time = "2024-11-20T19:41:11.244Z" },
-]
-
-[[package]]
-name = "varint"
-version = "1.0.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a8/fe/1ea0ba0896dfa47186692655b86db3214c4b7c9e0e76c7b1dc257d101ab1/varint-1.0.2.tar.gz", hash = "sha256:a6ecc02377ac5ee9d65a6a8ad45c9ff1dac8ccee19400a5950fb51d594214ca5", size = 1886, upload-time = "2016-02-24T20:42:38.5Z" }
-
-[[package]]
-name = "websockets"
-version = "15.0.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" },
- { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" },
- { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" },
- { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" },
- { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" },
- { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" },
- { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" },
- { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" },
- { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" },
- { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" },
- { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" },
- { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" },
- { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" },
- { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" },
- { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" },
- { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" },
- { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" },
- { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" },
- { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" },
- { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" },
- { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" },
- { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" },
- { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
-]
-
-[[package]]
-name = "wrapt"
-version = "1.17.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9f/41/cad1aba93e752f1f9268c77270da3c469883d56e2798e7df6240dcb2287b/wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0", size = 53998, upload-time = "2025-08-12T05:51:47.138Z" },
- { url = "https://files.pythonhosted.org/packages/60/f8/096a7cc13097a1869fe44efe68dace40d2a16ecb853141394047f0780b96/wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba", size = 39020, upload-time = "2025-08-12T05:51:35.906Z" },
- { url = "https://files.pythonhosted.org/packages/33/df/bdf864b8997aab4febb96a9ae5c124f700a5abd9b5e13d2a3214ec4be705/wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd", size = 39098, upload-time = "2025-08-12T05:51:57.474Z" },
- { url = "https://files.pythonhosted.org/packages/9f/81/5d931d78d0eb732b95dc3ddaeeb71c8bb572fb01356e9133916cd729ecdd/wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828", size = 88036, upload-time = "2025-08-12T05:52:34.784Z" },
- { url = "https://files.pythonhosted.org/packages/ca/38/2e1785df03b3d72d34fc6252d91d9d12dc27a5c89caef3335a1bbb8908ca/wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9", size = 88156, upload-time = "2025-08-12T05:52:13.599Z" },
- { url = "https://files.pythonhosted.org/packages/b3/8b/48cdb60fe0603e34e05cffda0b2a4adab81fd43718e11111a4b0100fd7c1/wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396", size = 87102, upload-time = "2025-08-12T05:52:14.56Z" },
- { url = "https://files.pythonhosted.org/packages/3c/51/d81abca783b58f40a154f1b2c56db1d2d9e0d04fa2d4224e357529f57a57/wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc", size = 87732, upload-time = "2025-08-12T05:52:36.165Z" },
- { url = "https://files.pythonhosted.org/packages/9e/b1/43b286ca1392a006d5336412d41663eeef1ad57485f3e52c767376ba7e5a/wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe", size = 36705, upload-time = "2025-08-12T05:53:07.123Z" },
- { url = "https://files.pythonhosted.org/packages/28/de/49493f962bd3c586ab4b88066e967aa2e0703d6ef2c43aa28cb83bf7b507/wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c", size = 38877, upload-time = "2025-08-12T05:53:05.436Z" },
- { url = "https://files.pythonhosted.org/packages/f1/48/0f7102fe9cb1e8a5a77f80d4f0956d62d97034bbe88d33e94699f99d181d/wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6", size = 36885, upload-time = "2025-08-12T05:52:54.367Z" },
- { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" },
- { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" },
- { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" },
- { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" },
- { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" },
- { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" },
- { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" },
- { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" },
- { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" },
- { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" },
- { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" },
- { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" },
- { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" },
- { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" },
- { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" },
- { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" },
- { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" },
- { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" },
- { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" },
- { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" },
- { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" },
- { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" },
- { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" },
- { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" },
- { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" },
- { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" },
- { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" },
- { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" },
- { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" },
- { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" },
- { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" },
-]
-
-[[package]]
-name = "xxhash"
-version = "3.5.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241, upload-time = "2024-08-17T09:20:38.972Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/07/0e/1bfce2502c57d7e2e787600b31c83535af83746885aa1a5f153d8c8059d6/xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00", size = 31969, upload-time = "2024-08-17T09:18:24.025Z" },
- { url = "https://files.pythonhosted.org/packages/3f/d6/8ca450d6fe5b71ce521b4e5db69622383d039e2b253e9b2f24f93265b52c/xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9", size = 30787, upload-time = "2024-08-17T09:18:25.318Z" },
- { url = "https://files.pythonhosted.org/packages/5b/84/de7c89bc6ef63d750159086a6ada6416cc4349eab23f76ab870407178b93/xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84", size = 220959, upload-time = "2024-08-17T09:18:26.518Z" },
- { url = "https://files.pythonhosted.org/packages/fe/86/51258d3e8a8545ff26468c977101964c14d56a8a37f5835bc0082426c672/xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793", size = 200006, upload-time = "2024-08-17T09:18:27.905Z" },
- { url = "https://files.pythonhosted.org/packages/02/0a/96973bd325412feccf23cf3680fd2246aebf4b789122f938d5557c54a6b2/xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be", size = 428326, upload-time = "2024-08-17T09:18:29.335Z" },
- { url = "https://files.pythonhosted.org/packages/11/a7/81dba5010f7e733de88af9555725146fc133be97ce36533867f4c7e75066/xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6", size = 194380, upload-time = "2024-08-17T09:18:30.706Z" },
- { url = "https://files.pythonhosted.org/packages/fb/7d/f29006ab398a173f4501c0e4977ba288f1c621d878ec217b4ff516810c04/xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90", size = 207934, upload-time = "2024-08-17T09:18:32.133Z" },
- { url = "https://files.pythonhosted.org/packages/8a/6e/6e88b8f24612510e73d4d70d9b0c7dff62a2e78451b9f0d042a5462c8d03/xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27", size = 216301, upload-time = "2024-08-17T09:18:33.474Z" },
- { url = "https://files.pythonhosted.org/packages/af/51/7862f4fa4b75a25c3b4163c8a873f070532fe5f2d3f9b3fc869c8337a398/xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2", size = 203351, upload-time = "2024-08-17T09:18:34.889Z" },
- { url = "https://files.pythonhosted.org/packages/22/61/8d6a40f288f791cf79ed5bb113159abf0c81d6efb86e734334f698eb4c59/xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d", size = 210294, upload-time = "2024-08-17T09:18:36.355Z" },
- { url = "https://files.pythonhosted.org/packages/17/02/215c4698955762d45a8158117190261b2dbefe9ae7e5b906768c09d8bc74/xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab", size = 414674, upload-time = "2024-08-17T09:18:38.536Z" },
- { url = "https://files.pythonhosted.org/packages/31/5c/b7a8db8a3237cff3d535261325d95de509f6a8ae439a5a7a4ffcff478189/xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e", size = 192022, upload-time = "2024-08-17T09:18:40.138Z" },
- { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170, upload-time = "2024-08-17T09:18:42.163Z" },
- { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040, upload-time = "2024-08-17T09:18:43.699Z" },
- { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796, upload-time = "2024-08-17T09:18:45.29Z" },
- { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795, upload-time = "2024-08-17T09:18:46.813Z" },
- { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792, upload-time = "2024-08-17T09:18:47.862Z" },
- { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950, upload-time = "2024-08-17T09:18:49.06Z" },
- { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980, upload-time = "2024-08-17T09:18:50.445Z" },
- { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324, upload-time = "2024-08-17T09:18:51.988Z" },
- { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370, upload-time = "2024-08-17T09:18:54.164Z" },
- { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911, upload-time = "2024-08-17T09:18:55.509Z" },
- { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352, upload-time = "2024-08-17T09:18:57.073Z" },
- { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410, upload-time = "2024-08-17T09:18:58.54Z" },
- { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322, upload-time = "2024-08-17T09:18:59.943Z" },
- { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725, upload-time = "2024-08-17T09:19:01.332Z" },
- { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070, upload-time = "2024-08-17T09:19:03.007Z" },
- { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172, upload-time = "2024-08-17T09:19:04.355Z" },
- { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041, upload-time = "2024-08-17T09:19:05.435Z" },
- { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801, upload-time = "2024-08-17T09:19:06.547Z" },
-]
-
-[[package]]
-name = "yarl"
-version = "1.20.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "idna" },
- { name = "multidict" },
- { name = "propcache" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" },
- { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" },
- { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" },
- { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" },
- { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" },
- { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" },
- { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" },
- { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" },
- { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" },
- { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" },
- { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" },
- { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" },
- { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" },
- { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" },
- { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" },
- { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" },
- { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" },
- { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" },
- { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" },
- { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" },
- { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" },
- { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" },
- { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" },
- { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" },
- { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" },
- { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" },
- { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" },
- { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" },
- { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" },
- { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" },
- { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" },
- { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" },
- { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" },
- { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" },
- { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" },
- { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" },
- { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" },
- { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" },
- { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" },
- { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" },
- { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" },
- { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" },
- { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" },
- { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" },
- { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" },
- { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" },
- { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" },
- { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" },
- { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" },
- { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" },
- { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" },
- { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" },
-]
-
-[[package]]
-name = "zstandard"
-version = "0.24.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/09/1b/c20b2ef1d987627765dcd5bf1dadb8ef6564f00a87972635099bb76b7a05/zstandard-0.24.0.tar.gz", hash = "sha256:fe3198b81c00032326342d973e526803f183f97aa9e9a98e3f897ebafe21178f", size = 905681, upload-time = "2025-08-17T18:36:36.352Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/26/e9/0bd281d9154bba7fc421a291e263911e1d69d6951aa80955b992a48289f6/zstandard-0.24.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a2bda8f2790add22773ee7a4e43c90ea05598bffc94c21c40ae0a9000b0133c3", size = 795710, upload-time = "2025-08-17T18:22:19.189Z" },
- { url = "https://files.pythonhosted.org/packages/36/26/b250a2eef515caf492e2d86732e75240cdac9d92b04383722b9753590c36/zstandard-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cc76de75300f65b8eb574d855c12518dc25a075dadb41dd18f6322bda3fe15d5", size = 640336, upload-time = "2025-08-17T18:22:20.466Z" },
- { url = "https://files.pythonhosted.org/packages/79/bf/3ba6b522306d9bf097aac8547556b98a4f753dc807a170becaf30dcd6f01/zstandard-0.24.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:d2b3b4bda1a025b10fe0269369475f420177f2cb06e0f9d32c95b4873c9f80b8", size = 5342533, upload-time = "2025-08-17T18:22:22.326Z" },
- { url = "https://files.pythonhosted.org/packages/ea/ec/22bc75bf054e25accdf8e928bc68ab36b4466809729c554ff3a1c1c8bce6/zstandard-0.24.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b84c6c210684286e504022d11ec294d2b7922d66c823e87575d8b23eba7c81f", size = 5062837, upload-time = "2025-08-17T18:22:24.416Z" },
- { url = "https://files.pythonhosted.org/packages/48/cc/33edfc9d286e517fb5b51d9c3210e5bcfce578d02a675f994308ca587ae1/zstandard-0.24.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c59740682a686bf835a1a4d8d0ed1eefe31ac07f1c5a7ed5f2e72cf577692b00", size = 5393855, upload-time = "2025-08-17T18:22:26.786Z" },
- { url = "https://files.pythonhosted.org/packages/73/36/59254e9b29da6215fb3a717812bf87192d89f190f23817d88cb8868c47ac/zstandard-0.24.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6324fde5cf5120fbf6541d5ff3c86011ec056e8d0f915d8e7822926a5377193a", size = 5451058, upload-time = "2025-08-17T18:22:28.885Z" },
- { url = "https://files.pythonhosted.org/packages/9a/c7/31674cb2168b741bbbe71ce37dd397c9c671e73349d88ad3bca9e9fae25b/zstandard-0.24.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:51a86bd963de3f36688553926a84e550d45d7f9745bd1947d79472eca27fcc75", size = 5546619, upload-time = "2025-08-17T18:22:31.115Z" },
- { url = "https://files.pythonhosted.org/packages/e6/01/1a9f22239f08c00c156f2266db857545ece66a6fc0303d45c298564bc20b/zstandard-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d82ac87017b734f2fb70ff93818c66f0ad2c3810f61040f077ed38d924e19980", size = 5046676, upload-time = "2025-08-17T18:22:33.077Z" },
- { url = "https://files.pythonhosted.org/packages/a7/91/6c0cf8fa143a4988a0361380ac2ef0d7cb98a374704b389fbc38b5891712/zstandard-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92ea7855d5bcfb386c34557516c73753435fb2d4a014e2c9343b5f5ba148b5d8", size = 5576381, upload-time = "2025-08-17T18:22:35.391Z" },
- { url = "https://files.pythonhosted.org/packages/e2/77/1526080e22e78871e786ccf3c84bf5cec9ed25110a9585507d3c551da3d6/zstandard-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3adb4b5414febf074800d264ddf69ecade8c658837a83a19e8ab820e924c9933", size = 4953403, upload-time = "2025-08-17T18:22:37.266Z" },
- { url = "https://files.pythonhosted.org/packages/6e/d0/a3a833930bff01eab697eb8abeafb0ab068438771fa066558d96d7dafbf9/zstandard-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6374feaf347e6b83ec13cc5dcfa70076f06d8f7ecd46cc71d58fac798ff08b76", size = 5267396, upload-time = "2025-08-17T18:22:39.757Z" },
- { url = "https://files.pythonhosted.org/packages/f3/5e/90a0db9a61cd4769c06374297ecfcbbf66654f74cec89392519deba64d76/zstandard-0.24.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:13fc548e214df08d896ee5f29e1f91ee35db14f733fef8eabea8dca6e451d1e2", size = 5433269, upload-time = "2025-08-17T18:22:42.131Z" },
- { url = "https://files.pythonhosted.org/packages/ce/58/fc6a71060dd67c26a9c5566e0d7c99248cbe5abfda6b3b65b8f1a28d59f7/zstandard-0.24.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0a416814608610abf5488889c74e43ffa0343ca6cf43957c6b6ec526212422da", size = 5814203, upload-time = "2025-08-17T18:22:44.017Z" },
- { url = "https://files.pythonhosted.org/packages/5c/6a/89573d4393e3ecbfa425d9a4e391027f58d7810dec5cdb13a26e4cdeef5c/zstandard-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0d66da2649bb0af4471699aeb7a83d6f59ae30236fb9f6b5d20fb618ef6c6777", size = 5359622, upload-time = "2025-08-17T18:22:45.802Z" },
- { url = "https://files.pythonhosted.org/packages/60/ff/2cbab815d6f02a53a9d8d8703bc727d8408a2e508143ca9af6c3cca2054b/zstandard-0.24.0-cp312-cp312-win32.whl", hash = "sha256:ff19efaa33e7f136fe95f9bbcc90ab7fb60648453b03f95d1de3ab6997de0f32", size = 435968, upload-time = "2025-08-17T18:22:49.493Z" },
- { url = "https://files.pythonhosted.org/packages/ce/a3/8f96b8ddb7ad12344218fbd0fd2805702dafd126ae9f8a1fb91eef7b33da/zstandard-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc05f8a875eb651d1cc62e12a4a0e6afa5cd0cc231381adb830d2e9c196ea895", size = 505195, upload-time = "2025-08-17T18:22:47.193Z" },
- { url = "https://files.pythonhosted.org/packages/a3/4a/bfca20679da63bfc236634ef2e4b1b4254203098b0170e3511fee781351f/zstandard-0.24.0-cp312-cp312-win_arm64.whl", hash = "sha256:b04c94718f7a8ed7cdd01b162b6caa1954b3c9d486f00ecbbd300f149d2b2606", size = 461605, upload-time = "2025-08-17T18:22:48.317Z" },
- { url = "https://files.pythonhosted.org/packages/ec/ef/db949de3bf81ed122b8ee4db6a8d147a136fe070e1015f5a60d8a3966748/zstandard-0.24.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e4ebb000c0fe24a6d0f3534b6256844d9dbf042fdf003efe5cf40690cf4e0f3e", size = 795700, upload-time = "2025-08-17T18:22:50.851Z" },
- { url = "https://files.pythonhosted.org/packages/99/56/fc04395d6f5eabd2fe6d86c0800d198969f3038385cb918bfbe94f2b0c62/zstandard-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:498f88f5109666c19531f0243a90d2fdd2252839cd6c8cc6e9213a3446670fa8", size = 640343, upload-time = "2025-08-17T18:22:51.999Z" },
- { url = "https://files.pythonhosted.org/packages/9b/0f/0b0e0d55f2f051d5117a0d62f4f9a8741b3647440c0ee1806b7bd47ed5ae/zstandard-0.24.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:0a9e95ceb180ccd12a8b3437bac7e8a8a089c9094e39522900a8917745542184", size = 5342571, upload-time = "2025-08-17T18:22:53.734Z" },
- { url = "https://files.pythonhosted.org/packages/5d/43/d74e49f04fbd62d4b5d89aeb7a29d693fc637c60238f820cd5afe6ca8180/zstandard-0.24.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bcf69e0bcddbf2adcfafc1a7e864edcc204dd8171756d3a8f3340f6f6cc87b7b", size = 5062723, upload-time = "2025-08-17T18:22:55.624Z" },
- { url = "https://files.pythonhosted.org/packages/8e/97/df14384d4d6a004388e6ed07ded02933b5c7e0833a9150c57d0abc9545b7/zstandard-0.24.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:10e284748a7e7fbe2815ca62a9d6e84497d34cfdd0143fa9e8e208efa808d7c4", size = 5393282, upload-time = "2025-08-17T18:22:57.655Z" },
- { url = "https://files.pythonhosted.org/packages/7e/09/8f5c520e59a4d41591b30b7568595eda6fd71c08701bb316d15b7ed0613a/zstandard-0.24.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:1bda8a85e5b9d5e73af2e61b23609a8cc1598c1b3b2473969912979205a1ff25", size = 5450895, upload-time = "2025-08-17T18:22:59.749Z" },
- { url = "https://files.pythonhosted.org/packages/d9/3d/02aba892327a67ead8cba160ee835cfa1fc292a9dcb763639e30c07da58b/zstandard-0.24.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1b14bc92af065d0534856bf1b30fc48753163ea673da98857ea4932be62079b1", size = 5546353, upload-time = "2025-08-17T18:23:01.457Z" },
- { url = "https://files.pythonhosted.org/packages/6a/6e/96c52afcde44da6a5313a1f6c356349792079808f12d8b69a7d1d98ef353/zstandard-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:b4f20417a4f511c656762b001ec827500cbee54d1810253c6ca2df2c0a307a5f", size = 5046404, upload-time = "2025-08-17T18:23:03.418Z" },
- { url = "https://files.pythonhosted.org/packages/da/b6/eefee6b92d341a7db7cd1b3885d42d30476a093720fb5c181e35b236d695/zstandard-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:337572a7340e1d92fd7fb5248c8300d0e91071002d92e0b8cabe8d9ae7b58159", size = 5576095, upload-time = "2025-08-17T18:23:05.331Z" },
- { url = "https://files.pythonhosted.org/packages/a3/29/743de3131f6239ba6611e17199581e6b5e0f03f268924d42468e29468ca0/zstandard-0.24.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:df4be1cf6e8f0f2bbe2a3eabfff163ef592c84a40e1a20a8d7db7f27cfe08fc2", size = 4953448, upload-time = "2025-08-17T18:23:07.225Z" },
- { url = "https://files.pythonhosted.org/packages/c9/11/bd36ef49fba82e307d69d93b5abbdcdc47d6a0bcbc7ffbbfe0ef74c2fec5/zstandard-0.24.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6885ae4b33aee8835dbdb4249d3dfec09af55e705d74d9b660bfb9da51baaa8b", size = 5267388, upload-time = "2025-08-17T18:23:09.127Z" },
- { url = "https://files.pythonhosted.org/packages/c0/23/a4cfe1b871d3f1ce1f88f5c68d7e922e94be0043f3ae5ed58c11578d1e21/zstandard-0.24.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:663848a8bac4fdbba27feea2926049fdf7b55ec545d5b9aea096ef21e7f0b079", size = 5433383, upload-time = "2025-08-17T18:23:11.343Z" },
- { url = "https://files.pythonhosted.org/packages/77/26/f3fb85f00e732cca617d4b9cd1ffa6484f613ea07fad872a8bdc3a0ce753/zstandard-0.24.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:05d27c953f2e0a3ecc8edbe91d6827736acc4c04d0479672e0400ccdb23d818c", size = 5813988, upload-time = "2025-08-17T18:23:13.194Z" },
- { url = "https://files.pythonhosted.org/packages/3d/8c/d7e3b424b73f3ce66e754595cbcb6d94ff49790c9ac37d50e40e8145cd44/zstandard-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77b8b7b98893eaf47da03d262816f01f251c2aa059c063ed8a45c50eada123a5", size = 5359756, upload-time = "2025-08-17T18:23:15.021Z" },
- { url = "https://files.pythonhosted.org/packages/90/6c/f1f0e11f1b295138f9da7e7ae22dcd9a1bb96a9544fa3b31507e431288f5/zstandard-0.24.0-cp313-cp313-win32.whl", hash = "sha256:cf7fbb4e54136e9a03c7ed7691843c4df6d2ecc854a2541f840665f4f2bb2edd", size = 435957, upload-time = "2025-08-17T18:23:18.835Z" },
- { url = "https://files.pythonhosted.org/packages/9f/03/ab8b82ae5eb49eca4d3662705399c44442666cc1ce45f44f2d263bb1ae31/zstandard-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:d64899cc0f33a8f446f1e60bffc21fa88b99f0e8208750d9144ea717610a80ce", size = 505171, upload-time = "2025-08-17T18:23:16.44Z" },
- { url = "https://files.pythonhosted.org/packages/db/12/89a2ecdea4bc73a934a30b66a7cfac5af352beac94d46cf289e103b65c34/zstandard-0.24.0-cp313-cp313-win_arm64.whl", hash = "sha256:57be3abb4313e0dd625596376bbb607f40059d801d51c1a1da94d7477e63b255", size = 461596, upload-time = "2025-08-17T18:23:17.603Z" },
- { url = "https://files.pythonhosted.org/packages/c9/56/f3d2c4d64aacee4aab89e788783636884786b6f8334c819f09bff1aa207b/zstandard-0.24.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b7fa260dd2731afd0dfa47881c30239f422d00faee4b8b341d3e597cface1483", size = 795747, upload-time = "2025-08-17T18:23:19.968Z" },
- { url = "https://files.pythonhosted.org/packages/32/2d/9d3e5f6627e4cb5e511803788be1feee2f0c3b94594591e92b81db324253/zstandard-0.24.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e05d66239d14a04b4717998b736a25494372b1b2409339b04bf42aa4663bf251", size = 640475, upload-time = "2025-08-17T18:23:21.5Z" },
- { url = "https://files.pythonhosted.org/packages/be/5d/48e66abf8c146d95330e5385633a8cfdd556fa8bd14856fe721590cbab2b/zstandard-0.24.0-cp314-cp314-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:622e1e04bd8a085994e02313ba06fbcf4f9ed9a488c6a77a8dbc0692abab6a38", size = 5343866, upload-time = "2025-08-17T18:23:23.351Z" },
- { url = "https://files.pythonhosted.org/packages/95/6c/65fe7ba71220a551e082e4a52790487f1d6bb8dfc2156883e088f975ad6d/zstandard-0.24.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:55872e818598319f065e8192ebefecd6ac05f62a43f055ed71884b0a26218f41", size = 5062719, upload-time = "2025-08-17T18:23:25.192Z" },
- { url = "https://files.pythonhosted.org/packages/cb/68/15ed0a813ff91be80cc2a610ac42e0fc8d29daa737de247bbf4bab9429a1/zstandard-0.24.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bb2446a55b3a0fd8aa02aa7194bd64740015464a2daaf160d2025204e1d7c282", size = 5393090, upload-time = "2025-08-17T18:23:27.145Z" },
- { url = "https://files.pythonhosted.org/packages/d4/89/e560427b74fa2da6a12b8f3af8ee29104fe2bb069a25e7d314c35eec7732/zstandard-0.24.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:2825a3951f945fb2613ded0f517d402b1e5a68e87e0ee65f5bd224a8333a9a46", size = 5450383, upload-time = "2025-08-17T18:23:29.044Z" },
- { url = "https://files.pythonhosted.org/packages/a3/95/0498328cbb1693885509f2fc145402b108b750a87a3af65b7250b10bd896/zstandard-0.24.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:09887301001e7a81a3618156bc1759e48588de24bddfdd5b7a4364da9a8fbc20", size = 5546142, upload-time = "2025-08-17T18:23:31.281Z" },
- { url = "https://files.pythonhosted.org/packages/8a/8a/64aa15a726594df3bf5d8decfec14fe20cd788c60890f44fcfc74d98c2cc/zstandard-0.24.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:98ca91dc9602cf351497d5600aa66e6d011a38c085a8237b370433fcb53e3409", size = 4953456, upload-time = "2025-08-17T18:23:33.234Z" },
- { url = "https://files.pythonhosted.org/packages/b0/b6/e94879c5cd6017af57bcba08519ed1228b1ebb15681efd949f4a00199449/zstandard-0.24.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:e69f8e534b4e254f523e2f9d4732cf9c169c327ca1ce0922682aac9a5ee01155", size = 5268287, upload-time = "2025-08-17T18:23:35.145Z" },
- { url = "https://files.pythonhosted.org/packages/fd/e5/1a3b3a93f953dbe9e77e2a19be146e9cd2af31b67b1419d6cc8e8898d409/zstandard-0.24.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:444633b487a711e34f4bccc46a0c5dfbe1aee82c1a511e58cdc16f6bd66f187c", size = 5433197, upload-time = "2025-08-17T18:23:36.969Z" },
- { url = "https://files.pythonhosted.org/packages/39/83/b6eb1e1181de994b29804e1e0d2dc677bece4177f588c71653093cb4f6d5/zstandard-0.24.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f7d3fe9e1483171e9183ffdb1fab07c5fef80a9c3840374a38ec2ab869ebae20", size = 5813161, upload-time = "2025-08-17T18:23:38.812Z" },
- { url = "https://files.pythonhosted.org/packages/f6/d3/2fb4166561591e9d75e8e35c79182aa9456644e2f4536f29e51216d1c513/zstandard-0.24.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:27b6fa72b57824a3f7901fc9cc4ce1c1c834b28f3a43d1d4254c64c8f11149d4", size = 5359831, upload-time = "2025-08-17T18:23:41.162Z" },
- { url = "https://files.pythonhosted.org/packages/11/94/6a9227315b774f64a67445f62152c69b4e5e49a52a3c7c4dad8520a55e20/zstandard-0.24.0-cp314-cp314-win32.whl", hash = "sha256:fdc7a52a4cdaf7293e10813fd6a3abc0c7753660db12a3b864ab1fb5a0c60c16", size = 444448, upload-time = "2025-08-17T18:23:45.151Z" },
- { url = "https://files.pythonhosted.org/packages/fc/de/67acaba311013e0798cb96d1a2685cb6edcdfc1cae378b297ea7b02c319f/zstandard-0.24.0-cp314-cp314-win_amd64.whl", hash = "sha256:656ed895b28c7e42dd5b40dfcea3217cfc166b6b7eef88c3da2f5fc62484035b", size = 516075, upload-time = "2025-08-17T18:23:42.8Z" },
- { url = "https://files.pythonhosted.org/packages/10/ae/45fd8921263cea0228b20aa31bce47cc66016b2aba1afae1c6adcc3dbb1f/zstandard-0.24.0-cp314-cp314-win_arm64.whl", hash = "sha256:0101f835da7de08375f380192ff75135527e46e3f79bef224e3c49cb640fef6a", size = 476847, upload-time = "2025-08-17T18:23:43.892Z" },
-]