From ddcfb459ff866cec58a3ea0255e7172b714eda9c Mon Sep 17 00:00:00 2001 From: Humza Tareen Date: Mon, 3 Nov 2025 14:07:46 +0500 Subject: [PATCH 1/5] Set up pre-commit workflow with Ruff linting and formatting - Add comprehensive .pre-commit-config.yaml with Ruff, isort, and file checks - Update pyproject.toml with enhanced Ruff configuration (lint + format) - Run Ruff across all files: fixed 1686+ linting issues automatically - Format codebase with Ruff formatter (24 files reformatted) - Update .gitignore for pre-commit, Ruff cache, and development tools - Install pre-commit hooks in git repository Note: Some non-critical linting warnings remain (49) that can be addressed incrementally. Pre-commit hooks now enforce code quality on all commits. --- .env.example | 2 +- .gitignore | 72 +- .pre-commit-config.yaml | 56 + .secrets.baseline | 7273 +++++++++++++++++++++++++ ENVIRONMENT_SETUP.md | 1 - IMPLEMENTATION_STATUS.md | 3 +- LOGGING_GUIDE.md | 1 - README.md | 160 +- alembic.ini | 2 +- alembic/README | 2 +- alembic/env.py | 13 +- app/agents/base.py | 111 +- app/agents/claude_agent.py | 417 +- app/agents/gemini_agent.py | 343 +- app/agents/iflow_agent.py | 295 +- app/agents/registry.py | 125 +- app/config.py | 109 +- app/domain/entities.py | 103 +- app/infrastructure/database.py | 202 +- app/infrastructure/queue.py | 58 +- app/main.py | 77 +- app/presentation/middleware.py | 107 +- app/presentation/routers/artifacts.py | 177 +- app/presentation/routers/health.py | 114 +- app/presentation/routers/logs.py | 280 +- app/presentation/routers/tasks.py | 324 +- app/services/judge_service.py | 493 +- app/services/pr_service.py | 460 +- app/services/prompt_service.py | 352 +- app/services/task_logger.py | 388 +- prompts/evaluator_set.txt | 8 +- prompts/precompression.txt | 4 +- pyproject.toml | 71 +- quick-start.md | 2 +- scripts/agent_entrypoint.sh | 42 +- scripts/run.sh | 56 +- scripts/stop.sh | 28 +- static/index.html | 142 +- static/logs.html | 114 +- worker.py | 23 +- workers/container_tasks.py | 479 +- workers/simple_worker.py | 853 +-- workers/tasks.py | 367 +- 43 files changed, 11347 insertions(+), 2962 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 .secrets.baseline diff --git a/.env.example b/.env.example index 46049a5..6b10245 100644 --- a/.env.example +++ b/.env.example @@ -62,4 +62,4 @@ JUDGE_MODEL=gpt-4o USE_GPT_PROMPTS=true PROMPT_MODEL=gpt-4o PROMPT_TEMPERATURE=1.0 -PROMPT_MAX_TOKENS=4000 \ No newline at end of file +PROMPT_MAX_TOKENS=4000 diff --git a/.gitignore b/.gitignore index 2dc90e1..4f729fb 100644 --- a/.gitignore +++ b/.gitignore @@ -230,4 +230,74 @@ test_outputs/ test_logs/ storage/ .cursor/ -!dockerfiles/ \ No newline at end of file +!dockerfiles/ + +# Pre-commit +.pre-commit-cache/ +.pre-commit-repos/ + +# Ruff +.ruff_cache/ +ruff.lock + +# Bandit +.bandit +*.bandit + +# Detect-secrets +# Note: .secrets.baseline should be committed to the repo +# (it tracks known secrets, not actual secrets) + +# Pytest +.pytest_cache/ +.tox/ +.hypothesis/ + +# Coverage reports +.coverage.* +htmlcov/ +coverage.xml +*.cover + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# IDE - Additional +*.sublime-project +*.sublime-workspace +.project +.pydevproject +.settings/ +*.code-workspace + +# Docker +docker-compose.override.yml +.dockerignore + +# Local configuration +.env.local +.env.*.local +local_settings.py + +# Redis dumps +dump.rdb + +# PostgreSQL +*.pgsql +pgdata/ + +# Temporary files +*.swp +*.swo +*~ +*.bak +*.backup + +# PID files +*.pid + +# Lock files (optional - uncomment if you don't want to commit) +# poetry.lock +# Pipfile.lock diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e910e34 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,56 @@ +# Pre-commit hooks configuration +# See https://pre-commit.com for more information +# Run `pre-commit install` to install hooks into your git repository + +repos: + # Ruff: Fast Python linter and formatter + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.4 + hooks: + # Run the linter + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + # Run the formatter + - id: ruff-format + + # General file checks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + exclude: ^alembic/versions/.*\.py$ + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + args: [--maxkb=1000] + - id: check-json + - id: check-toml + - id: check-merge-conflict + - id: check-case-conflict + - id: debug-statements + - id: mixed-line-ending + - id: requirements-txt-fixer + - id: fix-byte-order-marker + + # Python-specific checks + - repo: https://github.com/PyCQA/isort + rev: 5.13.2 + hooks: + - id: isort + args: ["--profile", "black", "--check-only"] + + # Security checks + - repo: https://github.com/PyCQA/bandit + rev: 1.7.8 + hooks: + - id: bandit + args: ["-r", ".", "-ll"] + exclude: ^(tests/|alembic/versions/) + + # Check for secrets (disabled for now due to baseline compatibility) + # - repo: https://github.com/Yelp/detect-secrets + # rev: v1.4.0 + # hooks: + # - id: detect-secrets + # args: ['--baseline', '.secrets.baseline'] + # exclude: package.json|pnpm-lock.yaml diff --git a/.secrets.baseline b/.secrets.baseline new file mode 100644 index 0000000..22e3288 --- /dev/null +++ b/.secrets.baseline @@ -0,0 +1,7273 @@ +{ + "version": "1.5.0", + "plugins_used": [ + { + "name": "ArtifactoryDetector" + }, + { + "name": "AWSKeyDetector" + }, + { + "name": "AzureStorageKeyDetector" + }, + { + "name": "Base64HighEntropyString", + "limit": 4.5 + }, + { + "name": "BasicAuthDetector" + }, + { + "name": "CloudantDetector" + }, + { + "name": "DiscordBotTokenDetector" + }, + { + "name": "GitHubTokenDetector" + }, + { + "name": "GitLabTokenDetector" + }, + { + "name": "HexHighEntropyString", + "limit": 3.0 + }, + { + "name": "IbmCloudIamDetector" + }, + { + "name": "IbmCosHmacDetector" + }, + { + "name": "IPPublicDetector" + }, + { + "name": "JwtTokenDetector" + }, + { + "name": "KeywordDetector", + "keyword_exclude": "" + }, + { + "name": "MailchimpDetector" + }, + { + "name": "NpmDetector" + }, + { + "name": "OpenAIDetector" + }, + { + "name": "PrivateKeyDetector" + }, + { + "name": "PypiTokenDetector" + }, + { + "name": "SendGridDetector" + }, + { + "name": "SlackDetector" + }, + { + "name": "SoftlayerDetector" + }, + { + "name": "SquareOAuthDetector" + }, + { + "name": "StripeDetector" + }, + { + "name": "TelegramBotTokenDetector" + }, + { + "name": "TwilioKeyDetector" + } + ], + "filters_used": [ + { + "path": "detect_secrets.filters.allowlist.is_line_allowlisted" + }, + { + "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", + "min_level": 2 + }, + { + "path": "detect_secrets.filters.heuristic.is_indirect_reference" + }, + { + "path": "detect_secrets.filters.heuristic.is_likely_id_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_lock_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_potential_uuid" + }, + { + "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign" + }, + { + "path": "detect_secrets.filters.heuristic.is_sequential_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_swagger_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_templated_secret" + } + ], + "results": { + ".env.example": [ + { + "type": "Basic Auth Credentials", + "filename": ".env.example", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 12 + } + ], + "ENVIRONMENT_SETUP.md": [ + { + "type": "Basic Auth Credentials", + "filename": "ENVIRONMENT_SETUP.md", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 9 + } + ], + "README.md": [ + { + "type": "Secret Keyword", + "filename": "README.md", + "hashed_secret": "c1e21d39ae6739d371e90119cb555f4c177e25d7", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Basic Auth Credentials", + "filename": "README.md", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 124 + }, + { + "type": "Basic Auth Credentials", + "filename": "README.md", + "hashed_secret": "c1e21d39ae6739d371e90119cb555f4c177e25d7", + "is_verified": false, + "line_number": 128 + } + ], + "alembic/versions/46022896a035_fix_uuid_primary_key_default_factories.py": [ + { + "type": "Hex High Entropy String", + "filename": "alembic/versions/46022896a035_fix_uuid_primary_key_default_factories.py", + "hashed_secret": "cf77e9d9553517d4e9a35ac59ef314bca9f007db", + "is_verified": false, + "line_number": 15 + } + ], + "app/config.py": [ + { + "type": "Basic Auth Credentials", + "filename": "app/config.py", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 34 + } + ], + "pnpm-lock.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f8c3bceb2fd5655f1c6e2c8b68c19ccc62e2ccd0", + "is_verified": false, + "line_number": 132 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a6b7a193f9685d14571c31a6a1b3add5e7887856", + "is_verified": false, + "line_number": 135 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d4eac0bf704336f38e69fd6832e6f021ded39602", + "is_verified": false, + "line_number": 138 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dc4d28f9890d4fa2dfd62af6dad92301d743197b", + "is_verified": false, + "line_number": 142 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5d38e92aeafa91e0b0d9dca469391b9cdfe9dcfd", + "is_verified": false, + "line_number": 146 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4a9ae62604ff3094a86888165571aedf55bb29b1", + "is_verified": false, + "line_number": 150 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "91ef8efb1f1a9f7b957debc2f9382278a11e9e91", + "is_verified": false, + "line_number": 154 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5cb278a68156ae376bea48fb0f872cbc5dfd0a63", + "is_verified": false, + "line_number": 158 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1ebe57ea46fba7d746cb222ee21b5cd413c80ff1", + "is_verified": false, + "line_number": 162 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1fc27c75818896b4e84d7e8ca98e0e8c00118bb7", + "is_verified": false, + "line_number": 166 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e844f3c78344b978e74bc22b6c2736d4b0c393e1", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8319864fae2ce720a9588f890930b4dd3f736a4f", + "is_verified": false, + "line_number": 176 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3e2a4d1a24c1c4aac8dc6ee6056c8a135c8db64a", + "is_verified": false, + "line_number": 180 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6302a9750ba626bf9d98ea2b93c8044c2c13dd03", + "is_verified": false, + "line_number": 184 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "aab5f636e7355b605c94009444540cb86f20a48d", + "is_verified": false, + "line_number": 188 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8570cc5660ba5cb0957e273be0100cce91506931", + "is_verified": false, + "line_number": 192 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7bd7cab0500b448b5af6034691432c96b964a4c8", + "is_verified": false, + "line_number": 197 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3c07010114b3f8a05c537e24dbca7494bcf3c0a4", + "is_verified": false, + "line_number": 202 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6345a5a64b6e2e20c37de7f332495c74b7e28b8b", + "is_verified": false, + "line_number": 207 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3c39819cbcfb00ae8e9393a2055d140e03e87570", + "is_verified": false, + "line_number": 212 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e6fee16d726ee6db2c1cf93104e5e939ac32632", + "is_verified": false, + "line_number": 218 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "eced9884adaa31fbecade795c3ccd81df9ba9fcc", + "is_verified": false, + "line_number": 224 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b2fbdf7c2e16e91a406593fec247dad23b5efbf8", + "is_verified": false, + "line_number": 229 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "01fb43d387fe0b3ac3455f566053a73bd219b7f3", + "is_verified": false, + "line_number": 234 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a3d57e425e9c3b5ab0b18abe7b8621625cc30c22", + "is_verified": false, + "line_number": 240 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d796b7b26f7703279b30e1ce28d78e9b21aa7682", + "is_verified": false, + "line_number": 245 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8eabb20032175f899539869abd92d947b1089edf", + "is_verified": false, + "line_number": 250 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "020015c71e54dae0a6e3c559389a0f833b37c586", + "is_verified": false, + "line_number": 255 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "16d2494dcd526d2ba248b35aef5636b20af470b5", + "is_verified": false, + "line_number": 260 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "14b6efeff14f789b722c80b84a350f8e81a98866", + "is_verified": false, + "line_number": 265 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e883069b4b8d57f17deab7c65aa5ae60f9c7e454", + "is_verified": false, + "line_number": 270 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7dc4aaf477bcf07897d462bdbf274b1108e666ce", + "is_verified": false, + "line_number": 276 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ecee7d3f33e999a8b3f8bc7f80c60d87a5158087", + "is_verified": false, + "line_number": 282 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "adc1d52f38ba77308117df559dea25e43eb9f263", + "is_verified": false, + "line_number": 288 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "67428c90207cc195acaf62942754b6a6861c8b90", + "is_verified": false, + "line_number": 292 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "de045e2ed3223c2ab40b541eb6a518fd1a3ad0f2", + "is_verified": false, + "line_number": 296 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "73b60e42d9313eca080440873f8fa1fcb5310f29", + "is_verified": false, + "line_number": 300 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "76a9ef26365296802b842dcf52bb19db8f769d72", + "is_verified": false, + "line_number": 304 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "68c49c8729d0ba1af9fa5fdfbec8da631ab3fd95", + "is_verified": false, + "line_number": 307 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "284e328f50abcdb9226644add2a3cf722e4653d3", + "is_verified": false, + "line_number": 310 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b77bcf9cfa531e940fc298899d0080339a73c0cb", + "is_verified": false, + "line_number": 313 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e8cd0c3094835591836c8fa7935c7bbb6ee65add", + "is_verified": false, + "line_number": 317 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "61dd02eaf9c90ea54d73e26217afccd18e1c7af1", + "is_verified": false, + "line_number": 324 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "af969ffa1ace7a92c94e95f67c7a11b1094f3757", + "is_verified": false, + "line_number": 331 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "196c34fa3c4c886415ff6da9263c5c5e6060cd42", + "is_verified": false, + "line_number": 337 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e14ced80698d248c765e292ecbf834b66bd0f16b", + "is_verified": false, + "line_number": 341 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f6a92c82ac2c20a614670b334108e7b491bd56bd", + "is_verified": false, + "line_number": 345 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9c9571c32c7a0300f9c1c66dc4062a911ac67a43", + "is_verified": false, + "line_number": 348 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "42c4da369db409dd68c843dc72292194221cb0ab", + "is_verified": false, + "line_number": 351 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3a7274dbc91ccac04e5d84ae6dde2b4edac928e1", + "is_verified": false, + "line_number": 354 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d93a0ad127212c5aaed4068d98a04570b4b279ac", + "is_verified": false, + "line_number": 357 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8020bacb068c6b5395bb2c1fb628ac443f630bd1", + "is_verified": false, + "line_number": 363 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9492fa339d21b43736ef7383d1fba3c3b6c5ee67", + "is_verified": false, + "line_number": 367 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c1939e055acf8b0feab5b14c183a340ca6b5c7a5", + "is_verified": false, + "line_number": 376 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9fb8dff15b72bd7f1ff7c2f66acc05a00dd2cec8", + "is_verified": false, + "line_number": 380 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "56d22aca0afbda06cdb1a54ee7ae2d4f2d5112bc", + "is_verified": false, + "line_number": 384 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e4e4ad32bcb306a8bd06c1389dd92eb445f7d8f", + "is_verified": false, + "line_number": 388 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1d2afd1a5616f4b51d5d801944a2d6568f66bf64", + "is_verified": false, + "line_number": 392 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fa33ce1f1f2b8288b39378c0f919a83f53e512a2", + "is_verified": false, + "line_number": 396 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "49890041c2f3ef27751c5dcec8e0f0501e43217f", + "is_verified": false, + "line_number": 400 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "480e746b8279a6fb10e1f99654332b595e81ef58", + "is_verified": false, + "line_number": 404 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a12c33144808266e4f61583195ce37e7013210da", + "is_verified": false, + "line_number": 408 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d2162ede46b32d726584142e374458026c95c209", + "is_verified": false, + "line_number": 412 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7252305e0579e7dd1ebf234edb891484f3ac7a71", + "is_verified": false, + "line_number": 416 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e2aa01d8e0e744299d1067b8a7316b20f90c08d3", + "is_verified": false, + "line_number": 420 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "966109ea059dc0878120e8f226aea4c23df64a32", + "is_verified": false, + "line_number": 429 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6d7a814b0f6d2d90597f8b010cbc8c6bfe6378a1", + "is_verified": false, + "line_number": 438 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a499da219a17b37305a4955e7bcdd1036990b4f6", + "is_verified": false, + "line_number": 442 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ac62fa2d9a7e307af9f1c1a506746ff5f6fe9a4f", + "is_verified": false, + "line_number": 451 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3bb33b13ae0deae68b66db8c03fc8b432b0e7c8e", + "is_verified": false, + "line_number": 455 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b62ae999218390b924a5304938e497fd61ec4e66", + "is_verified": false, + "line_number": 459 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1119596e8df549d5d1b2e83f34912cc0c84222d5", + "is_verified": false, + "line_number": 463 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c1d423d94eec2217b76cc22b4dde6962ba834b60", + "is_verified": false, + "line_number": 467 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "95727eeefae9320dfc3b39ce03f3f3a27e512b2c", + "is_verified": false, + "line_number": 471 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "562d542f061119bd1c12cd070dc95fcff6c78725", + "is_verified": false, + "line_number": 475 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8fd4ff49a8b64e198c4fb9a79cad0fa39e3fdb7e", + "is_verified": false, + "line_number": 484 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b517745fbe126a497c163e6e974e9726b70b3bc4", + "is_verified": false, + "line_number": 488 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "353f0591d50d7f47f92ad4df1e881f7be8d9c02f", + "is_verified": false, + "line_number": 492 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a419f5db63e1b4c3eacbef951020b96b3e3525e7", + "is_verified": false, + "line_number": 502 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3b4eb869d4e5a76988371a2f91d6beb3864a9c05", + "is_verified": false, + "line_number": 506 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf8ce0998d339a76912858591cfbcc4b0087cb13", + "is_verified": false, + "line_number": 510 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e04b9eb3cacf1761364c4edf3ff76863465136de", + "is_verified": false, + "line_number": 514 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "93ddeebaa35c6f0b01a9c74fe112c0cbbd747582", + "is_verified": false, + "line_number": 518 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "09b311eda56491e1118066ff3b4c2096582e9ae6", + "is_verified": false, + "line_number": 522 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f099f09661dfdbd2e8b725d48b333710210cf2d4", + "is_verified": false, + "line_number": 526 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f7c5dbf084a28d955a89d79f21bf1944ff5d28e3", + "is_verified": false, + "line_number": 530 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fcf9843ee5d7fd1e0381fe3a4bc404adce5086a0", + "is_verified": false, + "line_number": 539 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7d0516db23602e85bfd4ee968d0ed9d0fede2174", + "is_verified": false, + "line_number": 543 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "050894b0eaad3fd5ffb8ed06d319ea9d5c5772b7", + "is_verified": false, + "line_number": 547 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4a6b8e2be6e08dfcba96ed7d7d379786e4541f9f", + "is_verified": false, + "line_number": 551 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e411b2673151688be228dac74c38859a2f8b4296", + "is_verified": false, + "line_number": 555 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "75f6560d16756439bc5c52ec8afd7d01778cbc91", + "is_verified": false, + "line_number": 559 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c7c3d7f29c24cf381fce770dcdc76fef1f34c51d", + "is_verified": false, + "line_number": 563 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "baf5f86d7342069adfbf31f368aa72134e1a471d", + "is_verified": false, + "line_number": 567 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8733ba82d33885a92ca3009eb6ba61e505a1ee9a", + "is_verified": false, + "line_number": 571 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3e3522c83d724aa4133ee6bb6d627e4390be31a5", + "is_verified": false, + "line_number": 574 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9d1b9dbec9f048e048f4dd38591947ca7deff5dd", + "is_verified": false, + "line_number": 577 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "56f2c933b7afa8e13bf7c81e684d7adfdc945431", + "is_verified": false, + "line_number": 581 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f60c00cacddf514ad090fbde7005d8721adc0b89", + "is_verified": false, + "line_number": 584 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5cbb7677d5d0d783962ad9b88b167745805f5b8e", + "is_verified": false, + "line_number": 587 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d46c14589f8c831f97f886351b71d1bfaa77050d", + "is_verified": false, + "line_number": 590 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e4783c26733aeaeebcf5bddec5a8cec60cadfcc7", + "is_verified": false, + "line_number": 593 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e5e5262a0274c7848798c24b5c528b3766de597e", + "is_verified": false, + "line_number": 597 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1f9fcb738ea57bae77acc259bd652890f8ede593", + "is_verified": false, + "line_number": 600 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b758b5c1440cda3d90c91e5a525f0292d24f12bc", + "is_verified": false, + "line_number": 603 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ebed4bcc0633208a30eb5a5dbfeaa7364612a05d", + "is_verified": false, + "line_number": 607 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9a1373fb8bc6343a2626079c5c977463daea3541", + "is_verified": false, + "line_number": 610 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e768776e74a3cb5645d538a9bd074c78e7fb52b3", + "is_verified": false, + "line_number": 614 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "150a1608ffd7395d510860bc11f1b69b49ff015a", + "is_verified": false, + "line_number": 618 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e86e4979d9d712e11a75b4ebec1b901732124d77", + "is_verified": false, + "line_number": 622 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e7a300573911dfc80ae0808f6cd3ddc96c90975d", + "is_verified": false, + "line_number": 625 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "af21ea4772d3ec7feac09cde495c71292fb69903", + "is_verified": false, + "line_number": 628 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "be9f24635e32f68141a9ae50e377a146bdc3fda5", + "is_verified": false, + "line_number": 631 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0c389b15ae6e0c8d0bb1355bb3a83ed3134d3d87", + "is_verified": false, + "line_number": 635 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bcb82578fa8e86968c7019febdeab3965c19ff41", + "is_verified": false, + "line_number": 639 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "65c56ba331d8f8e709c231fda1c1fee1d788dbc5", + "is_verified": false, + "line_number": 642 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d032743174b8f7e110e192c6b48ccdb566919c1d", + "is_verified": false, + "line_number": 651 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ebad0bb8158d7902ef7c07c6db58469e5fd004d", + "is_verified": false, + "line_number": 660 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8e9b2903440cf96dfee52a171c0559db79f0773a", + "is_verified": false, + "line_number": 669 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf2041285173d49cb7dfee2323f78d0b404a3638", + "is_verified": false, + "line_number": 673 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "20107548d6d6094b3ab1e4178c7af6afbecc0ce9", + "is_verified": false, + "line_number": 682 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "749dbc49e29a2c48ba9a541b4d4c4a4446ab2397", + "is_verified": false, + "line_number": 687 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "69d6de7ec6d70163434cba5fc9ed8b6638deb702", + "is_verified": false, + "line_number": 692 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a8732996d4ee7f5899c1ddc7c532c7684475de80", + "is_verified": false, + "line_number": 697 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "05772f995c2616ab707f12cf35fc2d0392be5bde", + "is_verified": false, + "line_number": 702 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9d72eadbb695182a11da93e470b948cf38d9d726", + "is_verified": false, + "line_number": 707 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "05c55f65095943a1e18cd229b5736f428214fa28", + "is_verified": false, + "line_number": 712 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4120a2123079a499cbdeba4626f789a9e1cd6053", + "is_verified": false, + "line_number": 717 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2416571aca8c7ebc196722497d2bc16a10c5743b", + "is_verified": false, + "line_number": 722 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e3f8f493b5b6865b2abe74d73967d507b55be6ee", + "is_verified": false, + "line_number": 727 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e3d33900147d0b1a9b74effcaca98b468c7a8bcc", + "is_verified": false, + "line_number": 732 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1c885c0893086be42972a51bfe255dd7668cb3de", + "is_verified": false, + "line_number": 737 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "06fa9c785b4dba978e051680cb9f34a97ae7d239", + "is_verified": false, + "line_number": 742 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "76b0f50a39c773aedb96ecfbd921e1553c59a5f4", + "is_verified": false, + "line_number": 747 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "30ecedfb22a8a9183ce8b205e95a745280862f5d", + "is_verified": false, + "line_number": 752 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4b064f4e71c818c0bea4e489dbe82438530d36a7", + "is_verified": false, + "line_number": 757 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8623d35228ae126aa77a54451c596ce57df02b5c", + "is_verified": false, + "line_number": 762 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5768f4e12742da05a321e837697907d26462a893", + "is_verified": false, + "line_number": 767 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a3a479dbb3ff3ab7dace9df18a243a3fc3edc2e5", + "is_verified": false, + "line_number": 772 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8a90b0da4c9d03c9326e4e19b6cb5da440ac389e", + "is_verified": false, + "line_number": 777 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bee0ea468bf427ffe30a5c7fb595ed4624b539a5", + "is_verified": false, + "line_number": 782 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c77d05ba2971231ba9416f3b354518c0813e55e8", + "is_verified": false, + "line_number": 787 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b5133c9d969a8874db70acaf0bf0d0be17e28e7a", + "is_verified": false, + "line_number": 795 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "483582ef48e01288826dca781d74ae538aa91ad5", + "is_verified": false, + "line_number": 798 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4afb4366ef15b25dbb41844208be09a11a3a8e1b", + "is_verified": false, + "line_number": 806 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bc486c7ee8c6e705c40702113d890eb90f3495b6", + "is_verified": false, + "line_number": 809 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bac9e90033a09dc12f78d03d7b5e0acc677f0561", + "is_verified": false, + "line_number": 812 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f56458b615158e773335872b6b622996a1d2bc18", + "is_verified": false, + "line_number": 815 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0a440a4af470afd5385d77ec40dadb271c54ad9a", + "is_verified": false, + "line_number": 819 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2fc318592a3a1c436bcfdad7655bcf9e46938a65", + "is_verified": false, + "line_number": 822 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "67e6125078dce659f530f9932b974eaf56fe9aab", + "is_verified": false, + "line_number": 825 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ed0c8ff0a4cb7121759cb041ec471a92b3a8145f", + "is_verified": false, + "line_number": 831 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2a162eef39db8c87ccc129c3b589ff4c8fe2df92", + "is_verified": false, + "line_number": 837 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "00dd5bb98da0d70ae235f8527d1b2557f7fd74f1", + "is_verified": false, + "line_number": 843 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8ee0e0a0aa7ade61fdef7c3698448131dfdb1501", + "is_verified": false, + "line_number": 849 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "74094e2ee2360fc78734c53a07d143c3119e3bff", + "is_verified": false, + "line_number": 855 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "02aad1dead61b55fdbd00e8286bfa93cfbb8369e", + "is_verified": false, + "line_number": 861 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2e81c8e94ae84352ed1912e50cc2d47e410f61de", + "is_verified": false, + "line_number": 867 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "18698daa1b9855fb898001aa0688729b130cc8cf", + "is_verified": false, + "line_number": 873 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cfb1f0b562c2d59ddd1181b0638df2344d835e63", + "is_verified": false, + "line_number": 879 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b64ea10e6e84902e72321e8146291db23d0e72ee", + "is_verified": false, + "line_number": 883 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6212f9d3382a4d8ca9c1a1ea3f378fc05759db2e", + "is_verified": false, + "line_number": 887 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4be72c421a6098afc948ccf07238ff97f773147d", + "is_verified": false, + "line_number": 893 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e4c091ae78635cb31563e131c514206e808d2b28", + "is_verified": false, + "line_number": 899 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "08804f1af65c8031e075c7c94da30f32c5543783", + "is_verified": false, + "line_number": 905 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "31a50e754a720589cf4edcc9d94abfde752bc4d4", + "is_verified": false, + "line_number": 911 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a12331e760bc2c23623c85717e10908c4ae3542a", + "is_verified": false, + "line_number": 917 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7515ca041615c0ff98a6326433c77c52844c3fda", + "is_verified": false, + "line_number": 923 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c118bc7023bc725bca31941c95f76c782a5c3d13", + "is_verified": false, + "line_number": 929 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b27ac4fa627ec96aaeed1909dc05c85d807351a2", + "is_verified": false, + "line_number": 935 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "69f2d81ab827bdf98dcc1e45c1bc9fbdae8d3ccb", + "is_verified": false, + "line_number": 941 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "04b5169bec247412f71bfe7d34d97c62f380ef08", + "is_verified": false, + "line_number": 947 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4bf905d1a8bb4289cf9dc12c471e290c8e742658", + "is_verified": false, + "line_number": 953 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2b593dae70827750630dabb305283be02cfa35bb", + "is_verified": false, + "line_number": 962 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "79e9723e3b770334c12cd5b91ed958be94ecb201", + "is_verified": false, + "line_number": 965 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "289d98544273b403db1986f3a58d231a58a63912", + "is_verified": false, + "line_number": 971 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "de19d990dabd6918c63f5823a5f80304831ab8ae", + "is_verified": false, + "line_number": 974 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "36312ff7e86f5164146c184422d8c69f4341abf3", + "is_verified": false, + "line_number": 978 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "52b2f78c97766d4baf87093b060f8787fb516b4b", + "is_verified": false, + "line_number": 982 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "00eea5dd64958a54f4076af3b0d989b9028965ac", + "is_verified": false, + "line_number": 986 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d7c304fb1291c553c976795f5253f3261e12a941", + "is_verified": false, + "line_number": 1001 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fa6af34f01a24a0985eed505c4c407f1e4d2af9b", + "is_verified": false, + "line_number": 1005 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "24315e1502c9cc3f79c4c9fbfa2990a980dc8661", + "is_verified": false, + "line_number": 1008 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "23b847f3370f612113c53bca00cffae8e46fb58b", + "is_verified": false, + "line_number": 1011 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "50a0bc6ed4bd15cd54a9ef3e744d9e367b1ee0b2", + "is_verified": false, + "line_number": 1014 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f161e2b04f8faa8a514998b846300c0e52e3dde4", + "is_verified": false, + "line_number": 1017 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "21a255bacab0430cf0dd455985ab2ac363136cdb", + "is_verified": false, + "line_number": 1020 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "146791ae0b950340d17de1ffbda27158f3e8be1c", + "is_verified": false, + "line_number": 1023 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b12ce1057b41d0d8f07977de26139a24e65b12a6", + "is_verified": false, + "line_number": 1026 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e2c0a2b9eede183b6ec04f5f18ce0347d0ff98d8", + "is_verified": false, + "line_number": 1029 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "904dae7e3147870781ad476afdbc281061d76e62", + "is_verified": false, + "line_number": 1032 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d44a64c0233f7f71e6ffa1c3f65dcb001fa2dbe4", + "is_verified": false, + "line_number": 1035 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a3d0017933e859919c4c84d59093f79837ca3cd5", + "is_verified": false, + "line_number": 1038 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "959766c6a6b6cdf60c93ebc380814ffaa67d347c", + "is_verified": false, + "line_number": 1041 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d3752c4bb2cbe605385ab0dc0abfbf416925518a", + "is_verified": false, + "line_number": 1044 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2188b76f26f69ac9b3d1248fc174b0c6d3e841a6", + "is_verified": false, + "line_number": 1047 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "545a34d82a69ee367dc06267bd30705fc686e387", + "is_verified": false, + "line_number": 1050 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6fac840f70394d5613204d9ab0ac3efb6d2002c3", + "is_verified": false, + "line_number": 1053 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0ae75db1affde490fb9fc8f8b3fa4fdd11af62bf", + "is_verified": false, + "line_number": 1056 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7d975c98c254e3650d5088a2052e5841352de6a4", + "is_verified": false, + "line_number": 1059 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "58ad375918964945ca2addfe6681e5b4a970b0cd", + "is_verified": false, + "line_number": 1062 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4b71247a0a1153b33d71d778df9852df8fee1b58", + "is_verified": false, + "line_number": 1065 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2da4c1e543ab1e27fdbf23bfd19bebe804df57f9", + "is_verified": false, + "line_number": 1068 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3f25df454efc56dbfc9d363a2e994953d9a85580", + "is_verified": false, + "line_number": 1073 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8adf109486cc5abc4d522cbebe7b882f20ba76d5", + "is_verified": false, + "line_number": 1076 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ce6f98d29f348aec2e240eac697905732b103eba", + "is_verified": false, + "line_number": 1079 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d98726a712f2fd349258079c22cd37c91c6d3213", + "is_verified": false, + "line_number": 1082 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a8018e9c758cbf663fc1e4faf9c31b0d7a95615e", + "is_verified": false, + "line_number": 1085 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6333c496a262346ec033c10a9da94bcaeaab10b3", + "is_verified": false, + "line_number": 1088 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "91ac182754770244539b3544b6e3a16f33f7a622", + "is_verified": false, + "line_number": 1091 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "470f58e25b90ff2b0f388302d0e2a326f8e86f6b", + "is_verified": false, + "line_number": 1094 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8d73db03d596fc2383cce18710ce6efa95a1543c", + "is_verified": false, + "line_number": 1097 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7447b1ba0a8be59be886ddac716451a19dd89c79", + "is_verified": false, + "line_number": 1100 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fa1b426a7539a00f814bf0e558603b37ab92a493", + "is_verified": false, + "line_number": 1103 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0610fe0fbbc6a1aa9a13f632a681af96f02e6321", + "is_verified": false, + "line_number": 1106 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7b1b461fd86d4ff2979e7bd82539bc8bf4d3c39a", + "is_verified": false, + "line_number": 1114 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c42eac7e97fa63a58a8ff77f2a7ec8ed8666a20e", + "is_verified": false, + "line_number": 1121 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "72e333bf6fbfde873dcbed76ea97a5da29cb5353", + "is_verified": false, + "line_number": 1127 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6dc4a0ebfcaedc3393942538f369e02e18de4b42", + "is_verified": false, + "line_number": 1131 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a4f86a219bd4dd188ad14e6a4b530927fab940ec", + "is_verified": false, + "line_number": 1137 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "29fea7ff4b89ea0f15e6e7ae32c3f36ff055e553", + "is_verified": false, + "line_number": 1144 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "972fefd4a3780bd5a8edb900986d9f85f76f8340", + "is_verified": false, + "line_number": 1148 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9a76fc33440eb76f7471fc5370e379a9260f0ed8", + "is_verified": false, + "line_number": 1154 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a30ef283d9bc84cf9e03cd19737c321881e5836d", + "is_verified": false, + "line_number": 1161 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d9ea4a68b988aca799b7049c045c05d7f5eb839a", + "is_verified": false, + "line_number": 1165 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8842ad0c2b8591c188d31740af8c97f7497952f5", + "is_verified": false, + "line_number": 1168 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "843f6adc83e5bab548b2c1780b57d389041c7960", + "is_verified": false, + "line_number": 1173 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0bb042b843573172471a24b57b84d54ba2ae20a7", + "is_verified": false, + "line_number": 1178 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "24a26fde2915eb83aeb5b58047c67633448aadcc", + "is_verified": false, + "line_number": 1183 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "14bbf3e2247b687a2073aeedfe1a1bb199c7cd5f", + "is_verified": false, + "line_number": 1188 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ba3151c71df586ac67f0e20bb86af1f1841c695d", + "is_verified": false, + "line_number": 1193 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a01aaedf6ce0250d934af8db3d9c5d37b3cfa6f", + "is_verified": false, + "line_number": 1198 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6f2cdb3f3bb55a86fc6718198490e5de840d6cc3", + "is_verified": false, + "line_number": 1203 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e4bb77220eb431a8e23d0448d352aa6798396f49", + "is_verified": false, + "line_number": 1208 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8b907cff5099cbd0c2dc3b29fc93b875d267d846", + "is_verified": false, + "line_number": 1213 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fdad62357203dd309d14a7656ef638dddfc6a4f4", + "is_verified": false, + "line_number": 1218 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dce5179f9c57f3995ad68bc6f7cdb0f402344020", + "is_verified": false, + "line_number": 1223 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e431f461f14ae4aa92ca9a304256467d315385d6", + "is_verified": false, + "line_number": 1228 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a5c88a7b7a3cbb7d79e3c4eb272e2a49fc4f99f", + "is_verified": false, + "line_number": 1233 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d8ad402d1afd4eebda4d52652a5b1285a787d171", + "is_verified": false, + "line_number": 1238 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9b4737273b58c6b6b505f753a93507043228b137", + "is_verified": false, + "line_number": 1243 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "29338d4886dd68d07f58cdc0371a13c35922a199", + "is_verified": false, + "line_number": 1248 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f6c2ddf97f92d4d8afee3ab85e40c99943258b41", + "is_verified": false, + "line_number": 1253 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3880ca9edb15902cf3bdbc88e95723f18a879154", + "is_verified": false, + "line_number": 1258 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2f582518a7d025e7efbc4ef2cf8cc5cf9487b200", + "is_verified": false, + "line_number": 1263 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "30dff6e111c0a18b53cc71da853bac87f73a816c", + "is_verified": false, + "line_number": 1268 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c4500f6ef6d71520147cb4cf42394ed23891d064", + "is_verified": false, + "line_number": 1273 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9abc966298ceb3420110b6395ca5a9763974dcd2", + "is_verified": false, + "line_number": 1277 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "185bb3f8f27cbb091b79657e1a37873a0771462b", + "is_verified": false, + "line_number": 1281 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4f6636587fecd2341ba754a447e6a26db011795f", + "is_verified": false, + "line_number": 1289 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0c7f66893fe508baed443a4aad6003315428df3b", + "is_verified": false, + "line_number": 1297 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8559efc4c84b658659b90df5769175380a615150", + "is_verified": false, + "line_number": 1300 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4b35ff5b44d440d1ca230edd3335485c48bb201f", + "is_verified": false, + "line_number": 1303 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0499115048e37ca7a9a4bf376f60bc55e2efa76e", + "is_verified": false, + "line_number": 1306 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2ae70d048531371eeb026243778c761c5e8f33e3", + "is_verified": false, + "line_number": 1309 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9ffd4cfc9764a8a508ca76013e3d2e89db9f7ca1", + "is_verified": false, + "line_number": 1313 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "329a72857a5b286e373ba3b8aed6d0c92e16aa93", + "is_verified": false, + "line_number": 1317 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "35ca90e5fa9b87427a5ff8b3f333a7b7d45fc260", + "is_verified": false, + "line_number": 1321 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2b152257b2813a0c916c41c56987267bea0c935f", + "is_verified": false, + "line_number": 1325 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d15b82853a984c45aab87399878c98082bac85ba", + "is_verified": false, + "line_number": 1329 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "371ce323d657e273939fdad274085642a4da69de", + "is_verified": false, + "line_number": 1333 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e18dbe67238cfe0349177bd2fc9a70a93e4c2f12", + "is_verified": false, + "line_number": 1337 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "69508e9f25ead6b12c7a762c93b2a5b9cb587d60", + "is_verified": false, + "line_number": 1341 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0ca8a2e0847568b5b2783821e1c2e2004521e41d", + "is_verified": false, + "line_number": 1345 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d2eb4e9361fd3c06052590c09d8b407f1753b2d4", + "is_verified": false, + "line_number": 1348 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "22bd0abeadedbe4ac89a1415cff3788f50a4dc48", + "is_verified": false, + "line_number": 1351 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d032728a18c71c0a8e3f18b060f12d1ab9755c47", + "is_verified": false, + "line_number": 1354 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b8434babdebe87a28a32c342a9ed215e5fe43378", + "is_verified": false, + "line_number": 1357 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "494929ed66699c5de7dc97fc5e805fbafd6309f5", + "is_verified": false, + "line_number": 1361 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ab4554cb39629a429af7df5e2a1f5ecca900b84f", + "is_verified": false, + "line_number": 1365 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0936eedfa1422bfdd70867bcdc72323a7ad078fa", + "is_verified": false, + "line_number": 1369 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "47367337ff711b3771ebd5cd4b0c0db003ba9afb", + "is_verified": false, + "line_number": 1373 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "76d488d765b982facf35ed8c7d065c532fe88024", + "is_verified": false, + "line_number": 1377 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3deaf7a75f27491c72c104d279d3c8964b3d2bcb", + "is_verified": false, + "line_number": 1381 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "454c1d2deddffe9f16a5891c973c1cb806c667cb", + "is_verified": false, + "line_number": 1385 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "04c679c2b5720029dfda67cbce9091366b6ecb16", + "is_verified": false, + "line_number": 1389 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "288917f58bc65fd73334ce36e73d451608ed38a4", + "is_verified": false, + "line_number": 1393 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ffb034395898c52a3e20bb08a7dcb16ad43f4445", + "is_verified": false, + "line_number": 1397 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3b854e68dee708dbbbded718c296e6eae040fb04", + "is_verified": false, + "line_number": 1400 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "da3c2503d0bd44fe1cf4f3722696fc3e6f28cf38", + "is_verified": false, + "line_number": 1404 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bddcfb0f33e809897acef908b3bc5a5467377f8b", + "is_verified": false, + "line_number": 1408 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "599395997c127ee79a03df8cbc9c57d9c5c4b39b", + "is_verified": false, + "line_number": 1412 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "31824d618e388ae8df901316a17abfb59c88412d", + "is_verified": false, + "line_number": 1415 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5b22b457e591da89e3abe408798aab51efff2811", + "is_verified": false, + "line_number": 1419 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0be938c821b8b43c3168a92cd6413c9001269780", + "is_verified": false, + "line_number": 1423 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "25766437bfd653448e9869885709725b8e7daefd", + "is_verified": false, + "line_number": 1426 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b266076ae77a0bf9fa35e37377e69a138334854c", + "is_verified": false, + "line_number": 1429 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "113c50d8c9756e7d1594381f7c10a72ff3a59aa0", + "is_verified": false, + "line_number": 1432 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a908cc08ef7b2b82b091f46184f979afee56786a", + "is_verified": false, + "line_number": 1438 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0c4bda37196b07e093d1df6a337e2692c138929f", + "is_verified": false, + "line_number": 1442 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f8e687b2ede1bb7958e58e75e4da62a0c8e37f0a", + "is_verified": false, + "line_number": 1446 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4fdbdcbfb958dd1b903e4bb656ebf20b9a685938", + "is_verified": false, + "line_number": 1451 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5b7cd7946ca5e56496386ea38016606310bfcf03", + "is_verified": false, + "line_number": 1457 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7bb507ba2f47095177b03051a6340091be5871b2", + "is_verified": false, + "line_number": 1460 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d0b16bb13a7e571c3825930fe1839723bfbc27a5", + "is_verified": false, + "line_number": 1463 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6218cfe8ba1d7ca732ab6609d9fac82f424d34db", + "is_verified": false, + "line_number": 1466 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fc552bd96df19074c6deb25a11f515a6339037be", + "is_verified": false, + "line_number": 1470 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e2f8ed204b081d28612cca592b04d252763cfd43", + "is_verified": false, + "line_number": 1473 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0233cd7b0f5db5cdcb21d48d45d4783f847091bd", + "is_verified": false, + "line_number": 1476 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f83c0e1722151b136004f1e67d507feb727e52f6", + "is_verified": false, + "line_number": 1480 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "301ec28c85c9d136ed8b90c2defb08d2d1dc6dc0", + "is_verified": false, + "line_number": 1483 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4cd454d960136b7b4f74cca3c12010ff2ebd7663", + "is_verified": false, + "line_number": 1486 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3ba9641d7a9667c4b850637e48da8cbf35133fd7", + "is_verified": false, + "line_number": 1490 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "39706ecd1dc7f7807e0136ae3d22a50f9a7a6229", + "is_verified": false, + "line_number": 1495 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3df05797ec6bed4d2678569c6fbd357119f3a57e", + "is_verified": false, + "line_number": 1498 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "38119d6755480fd81caed75382e6bcbb3685ca44", + "is_verified": false, + "line_number": 1501 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3187e8d5152febf9bfa7333f8f95e4e0620975c5", + "is_verified": false, + "line_number": 1504 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "13f10bb48d68315aba73d76545a0065931bdd3f7", + "is_verified": false, + "line_number": 1507 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c61921cb03b0d17c9d180fa6d2d765269458f5e9", + "is_verified": false, + "line_number": 1512 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "23d6cd08c42fa08d84ffb1f94a47d6d8c610d8bf", + "is_verified": false, + "line_number": 1516 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6d8533be784cd9088f2080b57f895363771aa303", + "is_verified": false, + "line_number": 1520 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "09ce1f68022d542bd4b111b2e648b4261e7ea537", + "is_verified": false, + "line_number": 1524 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fb9b3f8fc8306134d84d0de28fe41bb5262bfe3d", + "is_verified": false, + "line_number": 1528 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f4713ff6d6ebe4c91eabb4abaa4fff79252287e2", + "is_verified": false, + "line_number": 1532 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e0c8e7bebd3b26c7a99961ec3498c0448dcb08d", + "is_verified": false, + "line_number": 1536 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d691ee4a30ccf501faf24e54b1ae328144910a9f", + "is_verified": false, + "line_number": 1540 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "accd3d42ae3e5a54f5c871a2c507ec4d5955911f", + "is_verified": false, + "line_number": 1544 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6791035095fb16620100008d8bd175f89259e13f", + "is_verified": false, + "line_number": 1548 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "43cc920fcbc708f7b19ab40becf389608e25a746", + "is_verified": false, + "line_number": 1552 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "63fe679f7b65ba5f52f19e22a4d5aef3ab544c0a", + "is_verified": false, + "line_number": 1555 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5e5e944819f9ed1defbf3b9cf387214fb2f04d61", + "is_verified": false, + "line_number": 1558 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f257f2080ed6e1642f3937cd657e455a8bfbab13", + "is_verified": false, + "line_number": 1562 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f4bc56ad66486cf701c55579bcf67111c569f7bf", + "is_verified": false, + "line_number": 1566 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "39ba3f70609aa7725903712c7cd0a68b269efd7c", + "is_verified": false, + "line_number": 1570 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4116283da073d4ab654db7f17a1325f7d331e9c5", + "is_verified": false, + "line_number": 1574 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "05e4106ccad50ac8213da0921ed6368ea031468c", + "is_verified": false, + "line_number": 1578 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f58ed644b01d5256cbe282a5c56dc6e6d7432e91", + "is_verified": false, + "line_number": 1581 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "01dc1ecbf3609e07708cb5d8368f6c964765bbe9", + "is_verified": false, + "line_number": 1584 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1118b1971e21e9244befa36e6c192a8beecaf5c8", + "is_verified": false, + "line_number": 1588 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0d8b99381e4ee2312c7def98de2115a8f75f866e", + "is_verified": false, + "line_number": 1591 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4776a58de368bbe8dd01e87ea42924720de07cc2", + "is_verified": false, + "line_number": 1595 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3d31499415c4ddff0f4794f83866f78b3b2f1a8e", + "is_verified": false, + "line_number": 1599 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0fa396e95b690119dc2b12237947c5443bdbb9d7", + "is_verified": false, + "line_number": 1603 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfc89ff4ae7819f01e5256e56d0581e46f0e7350", + "is_verified": false, + "line_number": 1607 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "02de50847f9b702b6f06c4bcab73b79022ec1da3", + "is_verified": false, + "line_number": 1611 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "85d82e9210a40d5176663ed85cc1e04288678dae", + "is_verified": false, + "line_number": 1615 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "690af56af1384793272c233c11ffb5d354b49fc0", + "is_verified": false, + "line_number": 1619 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "43e69c85090264b5501c0964aaa3d048ee08fd37", + "is_verified": false, + "line_number": 1623 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ee00fa5b9a854db184c08602b53656ef36185aff", + "is_verified": false, + "line_number": 1627 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "94729895770b7ab7572f90b68f3ed85a3ed06125", + "is_verified": false, + "line_number": 1630 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c5c346219c1b0dcd800c37bca46f05d6cc27fd2f", + "is_verified": false, + "line_number": 1634 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ed49acfbd180533265d2099f3043963c4a24443", + "is_verified": false, + "line_number": 1637 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "428e72f71455a785963f1df5f85c26c6d6ae519b", + "is_verified": false, + "line_number": 1641 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1dae660604ec052e4c8dfd54b0657636c412a22b", + "is_verified": false, + "line_number": 1644 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c182e71713f8c76e48acdb19a541f4a6da57de26", + "is_verified": false, + "line_number": 1647 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8", + "is_verified": false, + "line_number": 1651 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b51ebeb3335950689553d0ff7b21d7ffe5bf40d0", + "is_verified": false, + "line_number": 1655 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "df911743057e957d166c09e0746e85bf596195d6", + "is_verified": false, + "line_number": 1659 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b3a5fe8f11cc3b123e8440a3554d587231a4f9c0", + "is_verified": false, + "line_number": 1662 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "51e587955a294a8620b868666e05d79ba4d70a0c", + "is_verified": false, + "line_number": 1666 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d5abdaffd7f3965d9519f9f54c812eaeefcada00", + "is_verified": false, + "line_number": 1670 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f84a6782dec2fb34f9b02671d8cb7aedca3798ab", + "is_verified": false, + "line_number": 1674 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bc92ce9b854cc5fcabd61439ad8b6c1eb7b1cfad", + "is_verified": false, + "line_number": 1678 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "98da97093cff2b0693b2f8ccc5b7e74758ae9f3e", + "is_verified": false, + "line_number": 1681 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ca5991846a5c9f1941a9f711b31379a95e24f92f", + "is_verified": false, + "line_number": 1684 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fad0b293a732e46ead6eb4c451ec7fb5b727943b", + "is_verified": false, + "line_number": 1688 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "08c8ee0987f57add8cd4071488caef8ec431336a", + "is_verified": false, + "line_number": 1692 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "42cd07a10482e3b99b503ffc28f3e76e8dd1649c", + "is_verified": false, + "line_number": 1695 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "abca484beeee08e082bd184a38f3bf11583b6318", + "is_verified": false, + "line_number": 1699 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0be09dcb2d7e48e38cd4b3bce0788a7a5e8497a1", + "is_verified": false, + "line_number": 1702 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "862951cd71d0412b5e52bae3e952725486a655b0", + "is_verified": false, + "line_number": 1706 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "565f1fcd24069ce5728ff99d1855ca96859b8e45", + "is_verified": false, + "line_number": 1710 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bf2a7aa50ec6063cbb616daadb50eec0213fa61f", + "is_verified": false, + "line_number": 1714 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "54a43bbc3347478addf329caeb7193802e16ed40", + "is_verified": false, + "line_number": 1717 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8b6e3d167f9d34dcb4fb60d814fdfd4b8f850b7b", + "is_verified": false, + "line_number": 1721 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d028d3919e6a5213b2046e06c266ca3453209124", + "is_verified": false, + "line_number": 1724 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dae81dbaf5013ad00f8c2d751b41e68a8e007a0d", + "is_verified": false, + "line_number": 1729 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b2842d1820be756380aa1a279474505df803ec95", + "is_verified": false, + "line_number": 1733 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ac2356a53cdb1ad206f4fd2ea081ff3e52ac6fac", + "is_verified": false, + "line_number": 1737 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6a598f88e6817bef343117e6102c8d5e212b75d2", + "is_verified": false, + "line_number": 1741 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5b74ab04f863a02aef6acd27f8ab86aab825d2d7", + "is_verified": false, + "line_number": 1745 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b579cc9cf455d76457ba6922c763d7fcc554f34f", + "is_verified": false, + "line_number": 1749 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d", + "is_verified": false, + "line_number": 1752 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b549ab7733cde508848088c08a940a1351a7db01", + "is_verified": false, + "line_number": 1760 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "efde2de89be9751c3f5ba6eef72ec684690ba3aa", + "is_verified": false, + "line_number": 1768 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ea4d4ecb6fe3f99049d2eeb60e238904569e657c", + "is_verified": false, + "line_number": 1777 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "68cbc1a0fb6427b2ec20ed08ece8563357e526de", + "is_verified": false, + "line_number": 1781 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf3e48f5af3fca6bb9135acdee135247bcae1307", + "is_verified": false, + "line_number": 1785 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "eb9cba0903f17c882c113d36617d4c517d93ada0", + "is_verified": false, + "line_number": 1788 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bf81e2bbd66925d3c5159e0a6c30cc7dffa86d77", + "is_verified": false, + "line_number": 1792 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "001d1c5b6dd68dc1c73d24adfd6cef9918a5bc95", + "is_verified": false, + "line_number": 1800 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b0f6a9f09dd8a0ebc50e6d4a7a81e126cd64ac6a", + "is_verified": false, + "line_number": 1804 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c0c7aeed5e5a888b2aa3cdb739955fd27d25335e", + "is_verified": false, + "line_number": 1807 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d6ba3ee4287e22325ed322b0e5b279c4bc05781a", + "is_verified": false, + "line_number": 1811 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0d18d461f348476153b9b5741beda942f5b825fc", + "is_verified": false, + "line_number": 1814 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7b7d4712b8cd249ac13d7249d0b3fadaf6e5d341", + "is_verified": false, + "line_number": 1818 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "25a47cccee07333d84961bddca66e8a6eca962e8", + "is_verified": false, + "line_number": 1822 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6263b491d8e05bc0d6bc8b232c3a06044db2232c", + "is_verified": false, + "line_number": 1826 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d031e18cc14e54a719197084af4cd6b2372fd005", + "is_verified": false, + "line_number": 1830 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4fee84fc7c11d191151abd4429b4e2a27dfc9269", + "is_verified": false, + "line_number": 1834 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5cfbb495a02b599314652dfe8204aadb5cd48fbf", + "is_verified": false, + "line_number": 1838 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ee1accca30cee41b325697775406c5e5544600f", + "is_verified": false, + "line_number": 1842 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0de19c18e237ed835029485aad167c3980b5021f", + "is_verified": false, + "line_number": 1846 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "11a39d8fd6dc21c5f99ded4f4252af741e22ed94", + "is_verified": false, + "line_number": 1850 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c02b39bafb6389010b678e8a4714d2ddfb10fe59", + "is_verified": false, + "line_number": 1854 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "46a4e83f9f3f3670ec1cf4445789431038008dc2", + "is_verified": false, + "line_number": 1857 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a32ffe8d9fdbaeb585585ce8cd7b374a3023dc5c", + "is_verified": false, + "line_number": 1860 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "79119667cecad09809c34832b1f2b650b5ccded7", + "is_verified": false, + "line_number": 1864 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a9828d566602bc6a82937e9aa0c8997ab56802ed", + "is_verified": false, + "line_number": 1868 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2b55a80207531692665a7013085b7c4a9e51b416", + "is_verified": false, + "line_number": 1871 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "da3e732bacfbc1b6e5ac2ec23fd153c53622b4bf", + "is_verified": false, + "line_number": 1874 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "28878216b1c9b355c325467467ebb9802d2c2714", + "is_verified": false, + "line_number": 1877 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4d50b55900617c8d08097c03af9667c135c1fc24", + "is_verified": false, + "line_number": 1880 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2ef4fb56bf7e7819885549cd421d1976a81a578b", + "is_verified": false, + "line_number": 1883 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e59823f0907ab15ce17c9ea2f498697f072d6f0b", + "is_verified": false, + "line_number": 1886 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7cc9386a4d65b391a2cf652229566a5f984de68c", + "is_verified": false, + "line_number": 1890 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fc7b2f07f919c3f8e20aa78a1756d2bcca252fb9", + "is_verified": false, + "line_number": 1893 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b3e321bc2eec51aaeb743b17220649f3d43443bf", + "is_verified": false, + "line_number": 1896 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "21194e556cc6a23d3903b12189b0b9cd1050f2e5", + "is_verified": false, + "line_number": 1899 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fc231c807593551488e1df5cd5d488e39f7db93f", + "is_verified": false, + "line_number": 1903 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "762da446f010b10b3304d4865db3501bc43123eb", + "is_verified": false, + "line_number": 1906 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "79493155bf971702ff00e70c7bd59d1e56c9e40c", + "is_verified": false, + "line_number": 1910 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b32dc7581438839b3baa8cbb9dafcc80017588ea", + "is_verified": false, + "line_number": 1914 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "06b624999b2da9d4a43fbab83803a6e237d87b1b", + "is_verified": false, + "line_number": 1918 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8af94d587ff04c1f1a5c8990e2ade46a8fc41f6b", + "is_verified": false, + "line_number": 1922 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5c013387fcf13f792a50f7cbb6fedeccc848cafa", + "is_verified": false, + "line_number": 1925 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6d1db030d72c8b03fd95eaeb0266255402f70df5", + "is_verified": false, + "line_number": 1929 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf75ddffbc923b32123e91e069fbcb97ac39c4d9", + "is_verified": false, + "line_number": 1933 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2d014d80f2b2312eaf68f8df2a602ec60758bc11", + "is_verified": false, + "line_number": 1937 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "417a00b911e0b72a50a597cb7c4c70885c55516e", + "is_verified": false, + "line_number": 1941 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e28508e3ee2cfadee43ef9cfb9bb3b5d5d8ef5af", + "is_verified": false, + "line_number": 1945 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e908fcfcc8dff1ea5465cf990602f9647149e74d", + "is_verified": false, + "line_number": 1949 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9a378f73272ce5fb7e4f0f07edd21878eeee81be", + "is_verified": false, + "line_number": 1953 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3cb8e7a708267691af8530db673cae1ad0578392", + "is_verified": false, + "line_number": 1957 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "34ea38e6c1c9a08753e7719df1c86d09e06de5ed", + "is_verified": false, + "line_number": 1961 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cc0394ae9ff7278ac1f8ba8305e06627798ae34b", + "is_verified": false, + "line_number": 1965 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "73ca6edf410b4b5aa1437064ebde3b97f4016449", + "is_verified": false, + "line_number": 1968 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "25f91f21090c20e53ec7b34dbd7db402baf227b6", + "is_verified": false, + "line_number": 1972 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6c88ecf20f9dc070ee18f0cf59f58b0025f6ec55", + "is_verified": false, + "line_number": 1976 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f4e833281d75db65a01c4eb0e1c500917cf05886", + "is_verified": false, + "line_number": 1980 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "317c56ed0774aace8b6a698ca3027eae171f5fd5", + "is_verified": false, + "line_number": 1986 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "da3b519ed1f3003cd9d7b412c8cf21aa0658c315", + "is_verified": false, + "line_number": 1990 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a15181ed229c59a2ab1604b27574f841764159a2", + "is_verified": false, + "line_number": 1995 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ee9d1b64a3dfe95f432a67bb816895c3203aa972", + "is_verified": false, + "line_number": 2009 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "905103ba4d39bf98e3ae61a4846316e59edc5038", + "is_verified": false, + "line_number": 2015 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2a3f54a14501efb2beb680cc6f4204105f71309e", + "is_verified": false, + "line_number": 2021 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "64a5f7db706b0788befaabbc695844da839c85a6", + "is_verified": false, + "line_number": 2026 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f356716cf94345569637c97e678030c8785cfbaa", + "is_verified": false, + "line_number": 2029 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2a5c1a897332b7652d1daeb248e27b29123ca884", + "is_verified": false, + "line_number": 2033 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7bf692c49036f6e8ac29f859913d8b5ad4ed34bb", + "is_verified": false, + "line_number": 2037 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "31fb5f6fd3cefd1c5669ad40254a06e2fc811856", + "is_verified": false, + "line_number": 2041 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "40d5009f0315cf3dab4a4d129f8e556b6838d879", + "is_verified": false, + "line_number": 2051 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d8863c346b87ab9e420048134928a830cdb44e4f", + "is_verified": false, + "line_number": 2055 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dec5206f58aa7a9ae27aee17c254c2a8380af599", + "is_verified": false, + "line_number": 2060 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "51e9263c74493cd77c2d25adb255b15647d84512", + "is_verified": false, + "line_number": 2064 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1790c1f42e3416e31561c46b258d2f69c1608c85", + "is_verified": false, + "line_number": 2068 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8ab0d03e8d34c76769e2494c72290c441ebcc749", + "is_verified": false, + "line_number": 2072 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2fcbf434b04ba0a49ae29f1d9ea0edc30881350b", + "is_verified": false, + "line_number": 2075 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e8f5ad74437e14457e2dd858251a95be5935aa84", + "is_verified": false, + "line_number": 2079 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "07129d0144229ea2f909e58e6c5bd2816144e349", + "is_verified": false, + "line_number": 2082 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "37f00f3e2203487e83fa90426bd8f8245cc96c39", + "is_verified": false, + "line_number": 2085 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4b14989c7328ad3949ab46c13655b0571d524c8f", + "is_verified": false, + "line_number": 2089 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "99162dfa756ea3aa3b3ca46cb5bafb7be08363c9", + "is_verified": false, + "line_number": 2093 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0271ba23ffedb48366d081c6b6067ad2d4fe18ba", + "is_verified": false, + "line_number": 2097 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9d2178d94184ac4ad28f5c8ef6fa97d93ca3645c", + "is_verified": false, + "line_number": 2101 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f2c188d666cdf29baade740567f65df61ea1cf2e", + "is_verified": false, + "line_number": 2105 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158", + "is_verified": false, + "line_number": 2109 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1675daf3c3520cb15afb00c2cd618127c366f79e", + "is_verified": false, + "line_number": 2112 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7b56b5179117dccaee7b998a898521af941e7c42", + "is_verified": false, + "line_number": 2117 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c0f64a532897bbd5497996fdfec7cfcd087540ce", + "is_verified": false, + "line_number": 2121 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5112b43ee9ebd4cc5f505a8cd35d1e53f691f19d", + "is_verified": false, + "line_number": 2124 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b58ece1a23c6b414412378abceafd8549e609789", + "is_verified": false, + "line_number": 2127 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120", + "is_verified": false, + "line_number": 2131 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "283a181bd9971789ceaee59c32a0f93e31a7a2a8", + "is_verified": false, + "line_number": 2134 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a12ae282ce5c351ca57ebcbeb36ef4d6d56aa22e", + "is_verified": false, + "line_number": 2137 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "383566eee3c76b6e18064f3ebd645b26bbbfa121", + "is_verified": false, + "line_number": 2140 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dd69032e5f6d48933ecf00c4f236265686cf24b7", + "is_verified": false, + "line_number": 2143 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "16fa3bfaeef78e32910c87baf9060e51490d09ad", + "is_verified": false, + "line_number": 2146 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6e26c94da87e7607f956659ceae46d9d472dfaae", + "is_verified": false, + "line_number": 2155 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c5b9224d6550ddd6a9d3e700fd0a58360b11ca0d", + "is_verified": false, + "line_number": 2159 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7c235e480345a8563b9b7348d8b568bd51bf3814", + "is_verified": false, + "line_number": 2163 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6", + "is_verified": false, + "line_number": 2167 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a3e3e62659aa995f601a8a4eb9beb2525965dab", + "is_verified": false, + "line_number": 2171 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "135419fcccccb29d15b1bc525bb70a2d3808866a", + "is_verified": false, + "line_number": 2175 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0987509f1737d3768558ac71de7b481f4170f957", + "is_verified": false, + "line_number": 2178 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3594781b38f5d70bb1b747317347cf06b2a0cc01", + "is_verified": false, + "line_number": 2182 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "676ada89dea8b27c7aec35c6ef64b92468725789", + "is_verified": false, + "line_number": 2186 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "76332214739a4c7dd7c65873aa473b0b918d47e4", + "is_verified": false, + "line_number": 2190 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "935965ba7f384931bf702cbcecb35d13aadfa81a", + "is_verified": false, + "line_number": 2194 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "40e7076fc8fd248af67cfc317346eafeafa8f7a9", + "is_verified": false, + "line_number": 2197 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b22e9c725f4db08d46d260b8913feda1264acfc3", + "is_verified": false, + "line_number": 2206 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3aa0faf721de27687f7be4cc9ed4e61c745e6e44", + "is_verified": false, + "line_number": 2210 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c380f9202db66f599829889237fe718a66f7633b", + "is_verified": false, + "line_number": 2214 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8880036dd8f74c6cd85075c896ff8851627a0919", + "is_verified": false, + "line_number": 2217 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "58ca0ef615f18c001be473ebab47d6424f35a983", + "is_verified": false, + "line_number": 2221 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0fd202e9404f1d583e19e165ef069ee2c4939f50", + "is_verified": false, + "line_number": 2225 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1a2af2118ab8a4aa284c0f0cbe49b069ec8b38eb", + "is_verified": false, + "line_number": 2229 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "373831427f7199408d11295085894b997d8537cf", + "is_verified": false, + "line_number": 2233 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d950cd8ef510429f57fb282e4a437321ca86158c", + "is_verified": false, + "line_number": 2236 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e7f54899a1654525fbfb1d8c3ac44d632f655af1", + "is_verified": false, + "line_number": 2241 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "75c328564324e574f625b27b7cbeb722f3671a26", + "is_verified": false, + "line_number": 2244 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "984398efdb0e750e872686ca087b832568a6721c", + "is_verified": false, + "line_number": 2248 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f5e09813aa79c07abdd4edd0469c3fad0a4ec10d", + "is_verified": false, + "line_number": 2251 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "184476d985c0d799b82544165cb4d456129f594c", + "is_verified": false, + "line_number": 2255 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e9909ba7ebcf5a8ad10230f3027aa80bea274b72", + "is_verified": false, + "line_number": 2259 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4cd3a1b109eb67acf1557141e83880899d18eaea", + "is_verified": false, + "line_number": 2263 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "af5908e182714b9cf3e23d94666ad6aee26514a9", + "is_verified": false, + "line_number": 2267 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a4d96497ba98531c9c36ced1d043fbd8c535eff", + "is_verified": false, + "line_number": 2271 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "aea8f7ae787ad25f04c232d2f01917f4ff7d154c", + "is_verified": false, + "line_number": 2275 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e82d05de917a3ee897720bbe1f05f202b9b8226", + "is_verified": false, + "line_number": 2279 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "12d7fe8a858191bcb754bf370fa775a409eac3c9", + "is_verified": false, + "line_number": 2283 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "69de35c4b433e496d6537c8e606ebf02d87ab600", + "is_verified": false, + "line_number": 2287 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fd98b96850e508fc651300d225095d4ff5a274f1", + "is_verified": false, + "line_number": 2291 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "371ca38e7acde2cb9e7c1a8bea0355722ff047cf", + "is_verified": false, + "line_number": 2294 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "89345a033a4ecac54d56210d7fb9533fddc58490", + "is_verified": false, + "line_number": 2298 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ccf610151dd8c560eb61b9ef96d18395c2f89499", + "is_verified": false, + "line_number": 2302 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cb1eeac4fc1ef91c10d827c58904d458c8062d3d", + "is_verified": false, + "line_number": 2306 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c82d951d495ea37952ebb0f3fe072f69f8b0f685", + "is_verified": false, + "line_number": 2311 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d48d8ecd6e7cb00b02cfc3323c06bf5edd51d853", + "is_verified": false, + "line_number": 2315 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "95631761acb6ffe366cd95710b517dabfd6537e6", + "is_verified": false, + "line_number": 2319 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "766d97a9c49ed6b30ccfad20efab33366a616e8c", + "is_verified": false, + "line_number": 2323 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "043f592ee4062fbb234261a5cb68546115ae8485", + "is_verified": false, + "line_number": 2327 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4537218d8d617f282539b0ac3aa7eccfd29270b2", + "is_verified": false, + "line_number": 2331 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8001c0a43c08fe062fd978adfe8f0c735f09fa57", + "is_verified": false, + "line_number": 2335 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7998a3bf02db9121b7c800634814515a3ac87f67", + "is_verified": false, + "line_number": 2339 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "979e2a14e4576c62a9748d7466bfa7f4f3a0620d", + "is_verified": false, + "line_number": 2343 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d9b102378e12825849cbd59f222d9439965ec257", + "is_verified": false, + "line_number": 2347 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1e074256695d27dc0bea46d8da0b7d078ac360b9", + "is_verified": false, + "line_number": 2351 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f71df02c4e0a94ca1e2fdf91420a5279930d3e34", + "is_verified": false, + "line_number": 2354 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f750905460fbb9966ef1af61a3d005f9e7b7a19c", + "is_verified": false, + "line_number": 2357 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ff3f02370ab5ef742f4f7ac1f98a572f43a4a93b", + "is_verified": false, + "line_number": 2361 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f4083873ce7d6c93209c11e557c2835aad31e885", + "is_verified": false, + "line_number": 2365 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "301ae993b81b30029f870298b4637005d7133b78", + "is_verified": false, + "line_number": 2369 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "086b47ff4b09d5daf48fc11a12842df114051a64", + "is_verified": false, + "line_number": 2373 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "71b8927fbe4bf203ff52222400dd33ee89aa77af", + "is_verified": false, + "line_number": 2377 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ee70eebd28c286b11422957e7546ae66f041791", + "is_verified": false, + "line_number": 2380 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0d97c7fe1c124e07eb8bba42a86419c083ba0591", + "is_verified": false, + "line_number": 2384 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e07f2c68e0fe6abfac19da4e9dd0c81669de7078", + "is_verified": false, + "line_number": 2388 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "57bb6e8b0ed18d9c233416a4ab22ccf9bb06abca", + "is_verified": false, + "line_number": 2392 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e50a4bfa9929ba1cc6544fdea3d2b710f1cca201", + "is_verified": false, + "line_number": 2396 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f52c08f63289414b8da2ea1271f68c9e1d6338ea", + "is_verified": false, + "line_number": 2400 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3dfa69b1560e1d81d179e88b0a988b04f05e0e36", + "is_verified": false, + "line_number": 2404 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d2306a21994eefb44d055e593477d8c194e2acab", + "is_verified": false, + "line_number": 2407 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bdbc2ef42d826c041d0c6c9dc7f678cca5a9967e", + "is_verified": false, + "line_number": 2411 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c5955cdf32ea48f6aea18e5802f565e708178d2a", + "is_verified": false, + "line_number": 2414 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1a4cedb8b43b758521f2b5fbfcbb78a5f9128ed0", + "is_verified": false, + "line_number": 2418 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bbc5944e53f4bc00cb7c83d4ffdf2f3e477e2ee5", + "is_verified": false, + "line_number": 2422 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "70a9b00efd81fb7508f8c8f40f83d1ffe4cea469", + "is_verified": false, + "line_number": 2425 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a85db4af9637720e23cce6f9da6605bef81c6d35", + "is_verified": false, + "line_number": 2428 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4130a05bbbc2c204dd1470cd2f5cc0894c8e6071", + "is_verified": false, + "line_number": 2432 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5bcb233a4cf6e79f65a75aa15e06169c85eabfc4", + "is_verified": false, + "line_number": 2436 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "998b28c4af6e2d277e31578adbf3af0c5eb658af", + "is_verified": false, + "line_number": 2440 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "935d0ebd64dba2fd3ec6ab430ddc9b692acef7d0", + "is_verified": false, + "line_number": 2444 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "95bbece0a62e7912ea58ca14b18a1579347949c7", + "is_verified": false, + "line_number": 2448 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9cfd1c9ff642dd3f9093c2e68fda7b9daba943ca", + "is_verified": false, + "line_number": 2453 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "82bb8edb4003308fc261a3cd2488be9862e99657", + "is_verified": false, + "line_number": 2457 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c6ee5cc9425c6e106fad752bf1f79925d0f8d332", + "is_verified": false, + "line_number": 2460 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f09b00d00e74c891c9bbc0614a19d47015e616c1", + "is_verified": false, + "line_number": 2464 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f1ac10cceb66eae06a957162ed1c39f18f017ee5", + "is_verified": false, + "line_number": 2468 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5fb0a24dd6f8435a5158be97597af9c7ccf84a6c", + "is_verified": false, + "line_number": 2472 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfdca506eee44334854f8152738ba534bf01ef43", + "is_verified": false, + "line_number": 2476 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f668db2ce6001bba5403f0fa041d3a1d7d4dd608", + "is_verified": false, + "line_number": 2480 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e25be230742364f1ee60067a756da19dc7572126", + "is_verified": false, + "line_number": 2485 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1109544447e298af75035b2bbfc851610f963a65", + "is_verified": false, + "line_number": 2489 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "41e520378cd188868d9d2d6408c080f6fd5b37c9", + "is_verified": false, + "line_number": 2493 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5", + "is_verified": false, + "line_number": 2497 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3e0e69fc20064535d7eda17ef7ed449494f6f8bf", + "is_verified": false, + "line_number": 2500 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b395c25b20ca99c90718896b0c2a74f946c4de50", + "is_verified": false, + "line_number": 2503 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9eef72bb8169ee4de81344707e4a8257a917b3d7", + "is_verified": false, + "line_number": 2507 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "97a314e843d643884da31bac47721eb49f0b59c1", + "is_verified": false, + "line_number": 2511 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0af5f9ba961e97512657c04f4a545e17c6dd050e", + "is_verified": false, + "line_number": 2515 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "28b39ec24e3005969658899da8df00018ced10cf", + "is_verified": false, + "line_number": 2519 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "85ed1eba6b8c8ba942d225aeb6559a939a301adf", + "is_verified": false, + "line_number": 2522 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8b84e18123f74c16f846268dbf4572b48ec720e7", + "is_verified": false, + "line_number": 2526 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3c22fe71719555d5e4c508c724ee1c9a9728d8a1", + "is_verified": false, + "line_number": 2530 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "549b6fef4b036ed59ba6d106c33dcf4ff1dc6d55", + "is_verified": false, + "line_number": 2534 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bb2beaa7a8684460bbeeb734af599cfb5802456d", + "is_verified": false, + "line_number": 2538 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8bb78698191db0e79ec60926b8ab1b47dbe3f6b3", + "is_verified": false, + "line_number": 2542 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "86b823e63205b55fc703755bdb60b699ad07c39d", + "is_verified": false, + "line_number": 2546 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4cae70da23458d9c8ccc5ee4a4b226ce21829764", + "is_verified": false, + "line_number": 2550 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "92e5cd71f092b9f940ac454720733cd8fdbec1ec", + "is_verified": false, + "line_number": 2554 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1fb86d8581e6e42366820b9b8a967ee10d022c97", + "is_verified": false, + "line_number": 2558 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e9ce08fb0922eb7b6fd8078a79f948726b9b756e", + "is_verified": false, + "line_number": 2563 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "652497ab269df3544512bcfa91cac0c800c97901", + "is_verified": false, + "line_number": 2567 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d5ec021cbb92c3486e38fac176f4f59a6c1d92e8", + "is_verified": false, + "line_number": 2571 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "afdfe35e9c990e98e81dd22888ae07f91e150f8a", + "is_verified": false, + "line_number": 2575 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "08a356a135545721dadf132169454b7b8b7cd539", + "is_verified": false, + "line_number": 2579 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2d9bb7f81fd01f857fe9dd8bba1b0136ef524c49", + "is_verified": false, + "line_number": 2583 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e21a14d57a329853b993d691357b120b69736f3a", + "is_verified": false, + "line_number": 2587 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0168f03d6f1748d5d7ae624a93f711333b69d01c", + "is_verified": false, + "line_number": 2591 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d0f0c6db66192b167a07a408f33ef8dffa02f4bb", + "is_verified": false, + "line_number": 2595 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0d5226442f570f38dec1493dc5ebffac9fa67087", + "is_verified": false, + "line_number": 2599 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a3e277c241e0aec1a1f48c64ca9e86674f18e924", + "is_verified": false, + "line_number": 2603 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9e3b67a4d390644c949cdfeb8164784b571b629c", + "is_verified": false, + "line_number": 2606 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f88f23dafd3c868018f752bc0f0b816603b8fdcf", + "is_verified": false, + "line_number": 2610 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6dd8bf6e832a76d158587fc0f98a8e5c8503a040", + "is_verified": false, + "line_number": 2613 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e8e209569a38536aef621777815165826c4a20dd", + "is_verified": false, + "line_number": 2617 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8f95bdf43361cffe6720264f402b5636239f6367", + "is_verified": false, + "line_number": 2621 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f89ed9527422d1b197cbc9a4515b18bf462823a3", + "is_verified": false, + "line_number": 2625 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "743483deb9ac785430eed76f508202830ad96a86", + "is_verified": false, + "line_number": 2629 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3ec192ccf5cf4a3aa4b0ffb5a54a48c8dddb60b4", + "is_verified": false, + "line_number": 2633 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e907e408e8ac716a2dc03c3a56283b57e5bb5c73", + "is_verified": false, + "line_number": 2637 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0e32e02a9b0b0de21ec927960789a386eec96fa9", + "is_verified": false, + "line_number": 2640 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "49f6e9d43cadb9b9c956ec2fd3722009a8222abb", + "is_verified": false, + "line_number": 2643 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "49df54e3acc7b6544d323d975ae969cfe17df133", + "is_verified": false, + "line_number": 2647 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "84c32a435812055f5f1ea4b4bbfe917422473e41", + "is_verified": false, + "line_number": 2651 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e775297f58b69533534e9a520c7b8b5bde51a0f", + "is_verified": false, + "line_number": 2655 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "16d2f8a621dffabdf92f7dd8d33dabbe18f43ce8", + "is_verified": false, + "line_number": 2659 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2f78d49313c2bd0c91770e130bd4a06a3fc52392", + "is_verified": false, + "line_number": 2663 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "637e2e699ad406eaa342f5f29b4c4e9c59972ba8", + "is_verified": false, + "line_number": 2667 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "eb58f2a940741006a591b5de367d057f9239dde2", + "is_verified": false, + "line_number": 2671 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "339a5454990b92ebcdb27d46167833daac75e019", + "is_verified": false, + "line_number": 2674 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "629b000517fc951844761ad97b4ca53ea067f43c", + "is_verified": false, + "line_number": 2678 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ffcba46ed98546e8d0c9edbf9dfd2f80fcccb869", + "is_verified": false, + "line_number": 2682 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "46c26fc3315da3e9245eff2cc6163a3c27fbc9ab", + "is_verified": false, + "line_number": 2686 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "02caf1c0c7c1e839b9b3b85fa41701a1d38c679b", + "is_verified": false, + "line_number": 2690 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9be9a82a558bf3c167eef2646c53f92a7a726125", + "is_verified": false, + "line_number": 2694 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a0d12daa780297a6dfe6c16bdcc4b09b081ff062", + "is_verified": false, + "line_number": 2698 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "06db8e9d45e51f5be654b372301a579ea929b6b6", + "is_verified": false, + "line_number": 2701 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4e5e279b264f80f6302507f3bc02028118a5381f", + "is_verified": false, + "line_number": 2704 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "54ad4dacd7c5069f1e674dd8b7d1c2a78e405d06", + "is_verified": false, + "line_number": 2707 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e53d61332d1bf1be17d33eb4955ecd7fbed6bf02", + "is_verified": false, + "line_number": 2710 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "37f829b714b81fa242bfeb76fab61cc87effed86", + "is_verified": false, + "line_number": 2714 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4dfc67bc966e2494008a5f5e346fca119afccb91", + "is_verified": false, + "line_number": 2718 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3bff8900ed11d9495504774a20bd23285d6e9978", + "is_verified": false, + "line_number": 2722 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1633ee6a9a0c91bca3ca3691bc73488dab661b3a", + "is_verified": false, + "line_number": 2726 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "326f9b905982e34b66b74f51c1fd6eaa3ffe16a0", + "is_verified": false, + "line_number": 2730 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f8e1856295ecbf3d0c5b858e3d6fb63209e97f03", + "is_verified": false, + "line_number": 2734 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ac1e2c1049f07d2500571a54bad02f89063caf37", + "is_verified": false, + "line_number": 2737 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "11fd9b5b52ef9c3424f08cf39b03b96645987845", + "is_verified": false, + "line_number": 2741 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "16edfa4f7cc277c609b9bd8f8a26bc0cfc9898f5", + "is_verified": false, + "line_number": 2745 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "89bcae514ef16e47ab40a3e8c0eb5587a675bb12", + "is_verified": false, + "line_number": 2749 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "52b2517fe23fe840717fbb04ee8011460630bc6e", + "is_verified": false, + "line_number": 2759 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a415cfe76893c6c90bbada031d57aeb5fe3eb0cc", + "is_verified": false, + "line_number": 2774 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ac648e245e07d284aa43a42fa954db62cf1609f8", + "is_verified": false, + "line_number": 2778 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "07908f42ea25858dbc7f3f183365beaddaed7a27", + "is_verified": false, + "line_number": 2782 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8d44a45c64dfa043b8fe261e67a5e6f173380aa8", + "is_verified": false, + "line_number": 2786 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d483978ba35a046d8ce3ade5aabb1c0f0397d760", + "is_verified": false, + "line_number": 2790 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1985e44ec7a05f90a9a18a1161bc57a27b569644", + "is_verified": false, + "line_number": 2799 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "193c5daa9dcae02696a07197e1dbd70e202404f3", + "is_verified": false, + "line_number": 2803 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "732973af6062e55cb10d34e51f3c4d7d55071329", + "is_verified": false, + "line_number": 2809 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "15597c55837f6d5e2626237c5a61b3626153bc22", + "is_verified": false, + "line_number": 2813 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f998147814ae80b00f51219a21e48760afc8ef5e", + "is_verified": false, + "line_number": 2817 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "31fff47a74361458f8a85732ed8296e468b1738d", + "is_verified": false, + "line_number": 2821 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7741e32244d8b247bb3c8176701a4d34613571c4", + "is_verified": false, + "line_number": 2825 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f6209423912e67cdea012d57dd1e092f572cb9fb", + "is_verified": false, + "line_number": 2829 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "27b02ebe9968e8bf56485bf9fb773360771db656", + "is_verified": false, + "line_number": 2833 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "039aac9bd1402c3003a42f4faa5cc2c40609420f", + "is_verified": false, + "line_number": 2842 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "082460674bd63d8d5582b9dbf1f7e05b72d6bc46", + "is_verified": false, + "line_number": 2846 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3cc02f188401e61ebf230936e4349939883e5805", + "is_verified": false, + "line_number": 2850 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9fccafe5636dbbbe4731f7748236d657005a89d7", + "is_verified": false, + "line_number": 2854 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "df836df7687981a66d37e68336bfc2260043658f", + "is_verified": false, + "line_number": 2858 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "59b34e11013710e06e286e01d70c7ba3ba58a33e", + "is_verified": false, + "line_number": 2862 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "da882a02c093304700b53821f44482efc6d3d89c", + "is_verified": false, + "line_number": 2866 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4c0b05f9fcbaa13b2d650ebcd1d4745dcaaba284", + "is_verified": false, + "line_number": 2870 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3afdc7b2e5de49ff23ba6286e9b9eb1f6f8695e9", + "is_verified": false, + "line_number": 2874 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2dbc87b65df21e0f32dee947d0d45bd0e50fc411", + "is_verified": false, + "line_number": 2878 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b0d8924e434200d687a6f950fba7b78e44513d4f", + "is_verified": false, + "line_number": 2882 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "81945c57be5a43a4f3fa43916af1062df748552b", + "is_verified": false, + "line_number": 2886 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fe98b5e637d12318a327588377c46efd48d640cf", + "is_verified": false, + "line_number": 2896 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5856bd4cb3a23981807d6e537408344c13ffaada", + "is_verified": false, + "line_number": 2899 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fcb18ac405c4e422fe4b38c25a8f936a08da7223", + "is_verified": false, + "line_number": 2902 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "30234f92f50b1a55d8195d524fa8482302808e0d", + "is_verified": false, + "line_number": 2906 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "187380c9d6a767418ba5ab908ef80f0f509429e1", + "is_verified": false, + "line_number": 2910 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bbe19cbdee81cbfbd8bcebc265ce854b5c138e5f", + "is_verified": false, + "line_number": 2913 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3773d33c5207f8939a7a42e355daa3e28619cab1", + "is_verified": false, + "line_number": 2922 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b86a40402f1d7f696b1182d11f6ec1275abae433", + "is_verified": false, + "line_number": 2927 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "32f166e925f96dafcba74b3787246df2a06a9e19", + "is_verified": false, + "line_number": 2930 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a38b32005305ff20174b03ddb42d76d3ef9cd7cb", + "is_verified": false, + "line_number": 2933 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612", + "is_verified": false, + "line_number": 2936 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e7c4ee17ab711302f98d915dd0b266cd2d1063e5", + "is_verified": false, + "line_number": 2939 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", + "is_verified": false, + "line_number": 2942 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "518c6e09c048b1b3cdbcc4ed47df84e0552aba4c", + "is_verified": false, + "line_number": 2945 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2d6eff8bdc8c458731e9a8773a60ef0c21ab32a1", + "is_verified": false, + "line_number": 2948 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "88d5de50147c0a55cf86a9c79b0002573f21febe", + "is_verified": false, + "line_number": 2951 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ad6c7f8512a5eff9b7dbe3e51f27d1c5bf325f7", + "is_verified": false, + "line_number": 2956 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3274795deb291199e248f6c3ca531235569dc2ae", + "is_verified": false, + "line_number": 2959 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7cb7b57abcde8de51289f0e20d925d3c392c764c", + "is_verified": false, + "line_number": 2962 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2007bbacc35b6deebbdfbd75b9121a24378c2e39", + "is_verified": false, + "line_number": 2965 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b3a3fb7848d2d45536b9cfcff2d2f158fcea41d5", + "is_verified": false, + "line_number": 2969 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "061441420ff244c9caf340349c739b8158b3a1ef", + "is_verified": false, + "line_number": 2973 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4c565e2b5132b7df2d3126654bee51cf449f5d8f", + "is_verified": false, + "line_number": 2976 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "482661ab1a2dd1c3df1ac35182dea446441301d2", + "is_verified": false, + "line_number": 2979 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c67a4f462b254f45e1c4e55fc2a1248e0cfe8129", + "is_verified": false, + "line_number": 2983 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "16f7e60bfc60eb1530e738c27e95b7f5cb1bc6a6", + "is_verified": false, + "line_number": 2987 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "72308287f8c0e080820be69d27cd9810b9cd9de7", + "is_verified": false, + "line_number": 2991 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0ec89f07004a01dda54b29e48a86c84f8098fdb6", + "is_verified": false, + "line_number": 2995 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5ca14e34f1ca97b3207356816b44c42445f02180", + "is_verified": false, + "line_number": 2999 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1e94f147d0d24999498d3527047accfcd7cd2402", + "is_verified": false, + "line_number": 3003 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c892956af4178b43803ed37bb616843f31de4098", + "is_verified": false, + "line_number": 3006 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "239f82480195e8e6d05a2a7182bea061fe88fc2f", + "is_verified": false, + "line_number": 3011 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "511df6eab11dd9cb7ee87201e81e0cb3504efd86", + "is_verified": false, + "line_number": 3020 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e24a8cec071d865526b863f7dc34bd49679f7a29", + "is_verified": false, + "line_number": 3024 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1c386996454f57fad765143b1a1393b4fd15dceb", + "is_verified": false, + "line_number": 3028 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f30db3965cfaf45c216038e7ce454d23be9c08b4", + "is_verified": false, + "line_number": 3032 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "252175e036354d66ecc9592b558e55f81f876205", + "is_verified": false, + "line_number": 3035 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", + "is_verified": false, + "line_number": 3038 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6a0e4db763e3a9068152154d0ca5185d412bb775", + "is_verified": false, + "line_number": 3041 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "23c9c353abb8cfbd3b807c61885b19a64831a5a4", + "is_verified": false, + "line_number": 3045 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0c5af2fb426332112bb114018a4e0399f8ca0b87", + "is_verified": false, + "line_number": 3049 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fb9c7a703400fb200bda1edab8099ecde48d9af7", + "is_verified": false, + "line_number": 3053 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8bcb0724f3fbf4fe7b5662792050ad8c11d7e90e", + "is_verified": false, + "line_number": 3057 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "15931787d058bf9aee0a37f10bd073d262aa1225", + "is_verified": false, + "line_number": 3061 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "87aff3c6c2e4719c7c068c625dc5c6bb833ab411", + "is_verified": false, + "line_number": 3065 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "77c11d431ac834ce61a98eb450bc0db9f058b8d7", + "is_verified": false, + "line_number": 3068 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7ed5906c17ddd4e8330c8475dac722175e5e67c4", + "is_verified": false, + "line_number": 3072 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d7cdb9c755d36ef6bba01ff385c215194d07c523", + "is_verified": false, + "line_number": 3075 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7aa924d4aef9f14c78acee8a31bcbd78b26e9898", + "is_verified": false, + "line_number": 3079 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "96fa5c5ee2b8bcfc05130216d4f0168043c2239d", + "is_verified": false, + "line_number": 3083 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "31155ebdbc6a4b78c7a2146ce65263d30c45a9d5", + "is_verified": false, + "line_number": 3086 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bed2be7dcefaad482d5b1215bb9c485bc1904f76", + "is_verified": false, + "line_number": 3090 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d8e88da9e8f3e4fc17769c89a3779ec33b8fe4e4", + "is_verified": false, + "line_number": 3094 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fae8621d36509c7e508efc2cf8ecbfe28e79922a", + "is_verified": false, + "line_number": 3097 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d93832bc05a2154fcfa5468ce2ff32b41c52b03d", + "is_verified": false, + "line_number": 3101 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "87ae5db459dbd9fe4157473b016ae5ff5ef0f4ed", + "is_verified": false, + "line_number": 3105 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "60fc5bc215dd61c2c3fcbf28759972f33748cab3", + "is_verified": false, + "line_number": 3109 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2cf8bb850e018731465613b416d9603758dce6bb", + "is_verified": false, + "line_number": 3113 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "73f99321c1d31c19b059d58bf503320edb5ca0b8", + "is_verified": false, + "line_number": 3116 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "55028677ab6a65c8188c71b38c7a5ec4cfdb64f0", + "is_verified": false, + "line_number": 3120 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "81a165cc6dfc1c5b2a498fefdd6a0774232063b9", + "is_verified": false, + "line_number": 3123 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "838abb1331a160bdd441c25a2a2d44332ee8694b", + "is_verified": false, + "line_number": 3127 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e6525dbd438fc8858b0d56c59d164f403792ba1e", + "is_verified": false, + "line_number": 3131 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0913ac5bef0b5ee1e0619deee04bed578b2cb11a", + "is_verified": false, + "line_number": 3135 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "abb99044ff3c205878969cf6238197f65ebb1193", + "is_verified": false, + "line_number": 3139 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f3d0ff4509b5f0720a405308706748c59a350c8c", + "is_verified": false, + "line_number": 3143 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "06c654850ce93559d42fd5f657cbee6c4d9b47c5", + "is_verified": false, + "line_number": 3147 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "21896bc4745de6db3863cec7f9a6568b7337ce1f", + "is_verified": false, + "line_number": 3151 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b701486290777985572bdf83321be59eee7cd0c6", + "is_verified": false, + "line_number": 3155 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b230837c24f19045e3d6c0eb0413db8650788a23", + "is_verified": false, + "line_number": 3158 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6305ff4a4dd1bf9bbeb5bf8dfd029ef5d0672468", + "is_verified": false, + "line_number": 3162 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8ac8e759308be486fa9eb1fcadf99defd39eab96", + "is_verified": false, + "line_number": 3166 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0430ef838f88b8c253f36d7c2c5e9da6b30310b7", + "is_verified": false, + "line_number": 3169 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "aa2add0e083112de14c80fd8d81ec606de67ee43", + "is_verified": false, + "line_number": 3173 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "612b70ad4343adbb09a9cacc2fd28d7a2eae9313", + "is_verified": false, + "line_number": 3177 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "805d9656080308c11cde7cb7f0310e12c7e60c48", + "is_verified": false, + "line_number": 3180 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a581de441017c3dc49bba8717a9cdc7b136c924a", + "is_verified": false, + "line_number": 3183 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "84aa7d25b60745899c3d0ba6d1f9c6ad0808e39a", + "is_verified": false, + "line_number": 3193 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "03161c32559f722405cea8c58f19d3610b03ee6f", + "is_verified": false, + "line_number": 3197 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ca13874532c9df50a570a36576667e2bbd4383bc", + "is_verified": false, + "line_number": 3201 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6739c73e97c0a1fb8bd884e5f4b42243c02d9b50", + "is_verified": false, + "line_number": 3206 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "508ffd78a7522f83c630d98474d58ccf406bd25e", + "is_verified": false, + "line_number": 3211 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "db24a96822d27d963dfd24104c32896bea3378e7", + "is_verified": false, + "line_number": 3214 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a8e01696f1bffd9448a0f34e2f67ec0c5844c604", + "is_verified": false, + "line_number": 3217 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6fef1483a4a83bed140cea3e97e3f6923b914e1f", + "is_verified": false, + "line_number": 3220 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0a0edb4887f27327c55bced79962ddab83d9ec1b", + "is_verified": false, + "line_number": 3223 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0a68e003e521d8b996e1c07b3b5b2664de31fa44", + "is_verified": false, + "line_number": 3227 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a9c69bd590b69369612c15d26acb4eb2955587a0", + "is_verified": false, + "line_number": 3231 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "97d77287e5c1c07c7f96185e164752eca91c10dc", + "is_verified": false, + "line_number": 3235 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "eca834fda8bcec48ff3448478d81a28b3264c401", + "is_verified": false, + "line_number": 3239 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfa89e1607d2248db3523f241a0e9eb543cd0061", + "is_verified": false, + "line_number": 3242 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0817713c3585298fbb156482ffc6787018397167", + "is_verified": false, + "line_number": 3246 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "563cc740e3f8f3749478a07df90ec463fd703f82", + "is_verified": false, + "line_number": 3250 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "33e69b95839a030ace837c4048fe6758812edff2", + "is_verified": false, + "line_number": 3254 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8a831faa7741769cecdace6061af11bace90bcbf", + "is_verified": false, + "line_number": 3258 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a24b8cbc839a09cc0902bfb2576ae319a99a2385", + "is_verified": false, + "line_number": 3262 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "af6aebec89c4733b2ad35a16cd217fa649fc92bb", + "is_verified": false, + "line_number": 3266 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "91b0e0a1a2302bf2818d38bb308c1fe5d8dce7bb", + "is_verified": false, + "line_number": 3270 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "38c97b06ca380ec6b537fb959634cd6042e47cea", + "is_verified": false, + "line_number": 3274 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "064b874f13a3779d7227de426e2646eaaf7d1b39", + "is_verified": false, + "line_number": 3277 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f919cefce72278c96d5c9618afcd94552f6e6721", + "is_verified": false, + "line_number": 3281 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "87163ce6922ea47e72894ac3ea15bbbc181d7db8", + "is_verified": false, + "line_number": 3285 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b5a6f2a2101ff7f7af8d0c6c87c554acea27e0c9", + "is_verified": false, + "line_number": 3289 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "15a6f37195a5fe8a16872f13a514f2a6773c9f71", + "is_verified": false, + "line_number": 3293 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7356a0be2ffe123fd4b708a6d09ecdddaa038c22", + "is_verified": false, + "line_number": 3296 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0ed87c364475a9a1c3b8cfec235e72110cbc7dbe", + "is_verified": false, + "line_number": 3299 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "12d5ef9f69a56f09c87d65bce419ee256f1f24f7", + "is_verified": false, + "line_number": 3303 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "95a1f1449148121ac6b0cdc457dc80b86eb16ef7", + "is_verified": false, + "line_number": 3307 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d69c03a6c481565a321e7b90776918eaad16e532", + "is_verified": false, + "line_number": 3311 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b1655f0436ed02542ddf34e65e9897e59633b752", + "is_verified": false, + "line_number": 3315 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "316bf5ed09ed2d2511c54a3e60e047e26550c82c", + "is_verified": false, + "line_number": 3319 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5f1e4c4da24a321d56ae69c47446c44b3844c00b", + "is_verified": false, + "line_number": 3323 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c9a0f70271023a1b82d44b6544c32f834098c007", + "is_verified": false, + "line_number": 3327 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "55159cc007aeaf5fbf0746c70b85d9ddc16db6dd", + "is_verified": false, + "line_number": 3331 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d9cef10e2bf459ee9e56a25f81323a72c4e6a044", + "is_verified": false, + "line_number": 3334 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "139323d71483de0860981200fb39673681ab2ef2", + "is_verified": false, + "line_number": 3338 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d9efbdf71c548a000ac469ddb800a566a21d9824", + "is_verified": false, + "line_number": 3342 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cd1098ca8351dfe5f82c5ab55afdd90a70e6f496", + "is_verified": false, + "line_number": 3346 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d25f38e564eeda6402f558b3ad020bd05220df32", + "is_verified": false, + "line_number": 3350 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c", + "is_verified": false, + "line_number": 3353 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "533573708ed3eabce514432cd26d532218222659", + "is_verified": false, + "line_number": 3357 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1b6d7df51b5d3b60d36200961a9cc1885619e96b", + "is_verified": false, + "line_number": 3361 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "61ff5c8a702d8ffe28063c06487a3a5a0db2ed70", + "is_verified": false, + "line_number": 3365 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e159739ed2c59133b3d3012a0a2e1aa4dab506c8", + "is_verified": false, + "line_number": 3369 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6942521514db424f66a46a086146c8d65db76a9c", + "is_verified": false, + "line_number": 3372 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2521a6a9f229a3c36588d8c48dcfbdfdbcdf894e", + "is_verified": false, + "line_number": 3376 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e1beff0b39929c9538a683456356f4b7da203723", + "is_verified": false, + "line_number": 3380 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b3535a5defc70dbd440e104c8ed5aac37b8a6ca3", + "is_verified": false, + "line_number": 3383 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7e099865926cce2fe8adeb2eafe9bbae086e80b9", + "is_verified": false, + "line_number": 3387 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cb6a34af05ec6dd557e12fa9adad98c133f4ea6c", + "is_verified": false, + "line_number": 3390 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4439cc784aca22e5cdf33ed6656f5b8b99eaa44d", + "is_verified": false, + "line_number": 3393 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1ac881fcbb09fad5c8b422aa517c10f8aae9c8b0", + "is_verified": false, + "line_number": 3396 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "06477f13a769300ebac81ebe76739fb78432f564", + "is_verified": false, + "line_number": 3400 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8e9601b5560486eb9c42180660134bf5b3892986", + "is_verified": false, + "line_number": 3404 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "df98ab59d267c0f604757eb30efd5f4b1e6503d4", + "is_verified": false, + "line_number": 3409 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dabcefa910bca0b7a0a6e2ee5b8ba0afef580da4", + "is_verified": false, + "line_number": 3413 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a333e405c05cedb7d37ba5a38ad44f4d236fe04c", + "is_verified": false, + "line_number": 3417 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "217c18ff93ebbb4d315133c781f54f6ce0649cf7", + "is_verified": false, + "line_number": 3421 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ec9c40d4e13c33cc95a8d66490b87a6a24a789a4", + "is_verified": false, + "line_number": 3425 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "724656ceda14f595f858e0d40732b90914e04225", + "is_verified": false, + "line_number": 3429 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bf74a1e32c8e698224b6cc118bc38bb3c8ece59c", + "is_verified": false, + "line_number": 3435 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "157e5b0c4ac544ad1a995d6414e4dd539af39aa3", + "is_verified": false, + "line_number": 3447 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d0904af82a2a9cd58801135a07d80410115d374d", + "is_verified": false, + "line_number": 3450 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "740c72e48345f65f0deefba812581017bd57c614", + "is_verified": false, + "line_number": 3454 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c5c73e1be2887b3c0da541972ac48a15e5a79140", + "is_verified": false, + "line_number": 3458 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4a0a48c8725c1778eaef7ffcd9d14789a5b22263", + "is_verified": false, + "line_number": 3462 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e5b68dc212c35983909130bf6b5106e3a462ecbc", + "is_verified": false, + "line_number": 3466 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "84e94c466bc1e79bf5fa3ca0f91ad69a142e2b53", + "is_verified": false, + "line_number": 3471 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "21ce40fa7b942491db7f5babb19538e726c27e8d", + "is_verified": false, + "line_number": 3475 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1369198bf5cee9fcda4755ed3e7c041a4b9b04a5", + "is_verified": false, + "line_number": 3479 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7182ae6d1f2923a53c51ce219e0ef19de4656cf3", + "is_verified": false, + "line_number": 3483 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4f4dde0efe7acd336975cd25fc68e25347fd64ee", + "is_verified": false, + "line_number": 3487 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a44086439a402b1cfa18ba35abf41235c616d5a5", + "is_verified": false, + "line_number": 3491 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2fe871c9cf1a3c31f3ccc4756fa9e724fcf04ab8", + "is_verified": false, + "line_number": 3494 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "60edf9357ab126e077a488706704a2f658e19087", + "is_verified": false, + "line_number": 3497 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a87eb3f9bf9f1b7a5be91d498b1c821e3c9f094c", + "is_verified": false, + "line_number": 3500 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "706884687450108cef742ef7b12010f29167a501", + "is_verified": false, + "line_number": 3503 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b7a99d6a1f11bae3d2af655b736be489ed1c99e1", + "is_verified": false, + "line_number": 3507 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7410fa0f9ec078ee65600cf06cfe5f40899a43f2", + "is_verified": false, + "line_number": 3511 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "200d9ce3729d2bb7727c85eb3c53d4ec9f215438", + "is_verified": false, + "line_number": 3514 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6c8698ec180cb58d9c2baa683e3a57c1eb18e2f3", + "is_verified": false, + "line_number": 3518 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "47a29d935ba8e7949f48b6e8026399911abc1b56", + "is_verified": false, + "line_number": 3521 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a215faa77ef3f0cca4574804353e1df8298bdd49", + "is_verified": false, + "line_number": 3525 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "773059609b4366873bd8442db06662c2e66dafff", + "is_verified": false, + "line_number": 3528 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "02d385969dfaa27ab9ad385abdcdc4780680889f", + "is_verified": false, + "line_number": 3532 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2b4fcd00bc9e2b4aeb9e0675196f5058318a5a5e", + "is_verified": false, + "line_number": 3537 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ef92e3e90926767f04ea05b4fc58e71019cf68d5", + "is_verified": false, + "line_number": 3540 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a233e343e439f5c0b7dad5b0bb3aac587600408", + "is_verified": false, + "line_number": 3543 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "52b74d992fac7ef134def804e108cc2ab8eaf8b9", + "is_verified": false, + "line_number": 3546 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "68fc429cdef15b95e7216be802013b2add02f744", + "is_verified": false, + "line_number": 3550 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d487e804638557c87781416ce87296299cef1dc2", + "is_verified": false, + "line_number": 3553 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "080ec1636d10139e8aabf248dbacf3b0ff038837", + "is_verified": false, + "line_number": 3557 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9dff9e9fd2f6eeeb815eb9db94eff6248259ba16", + "is_verified": false, + "line_number": 3561 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8eb603f181d16409922883159c02899b089f6d60", + "is_verified": false, + "line_number": 3565 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1086c24dddddcd90c93ec844e6a35274ed793787", + "is_verified": false, + "line_number": 3569 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9f58bcbdd9a12a44804c013df58cf21b1cc0c0d7", + "is_verified": false, + "line_number": 3573 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a2e265c9ccb57c255a31362c32932df0701b72c", + "is_verified": false, + "line_number": 3577 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cd4f1724b6ec023c8ac16a21d8106fbe3bfac9d6", + "is_verified": false, + "line_number": 3581 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1c26b16e18acf30431dca9dfb0c8c938f6efb417", + "is_verified": false, + "line_number": 3585 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3c48660b66db6687c2ba1b7ffbac279614eead6e", + "is_verified": false, + "line_number": 3588 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5a485a4915f94231f9dc5b355ddda367442ba450", + "is_verified": false, + "line_number": 3592 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "87311dfdbf66115926199b1b8860a64d6ecd82f8", + "is_verified": false, + "line_number": 3596 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c5bb4d1bec6d3d483a2352b09c06190f2acc712a", + "is_verified": false, + "line_number": 3600 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "41135cd24fce09855f67710faed09efce38cd9fb", + "is_verified": false, + "line_number": 3604 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f241e44dd1421cd26ac78332db57217a61f50c94", + "is_verified": false, + "line_number": 3608 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f72dad6ae2b24fb3760f4f621858dca3ff565e36", + "is_verified": false, + "line_number": 3612 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "77545cc0538829cf48670f1777c19117acb2a3b6", + "is_verified": false, + "line_number": 3617 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "23a90c5e70b7d5eb28dc471b400c7490e2583af2", + "is_verified": false, + "line_number": 3621 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b994ef9a1ac4c3d06d3c3d11c1e876bdac49bc79", + "is_verified": false, + "line_number": 3624 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfb59a5ae276713612fc79718254aab71db0463d", + "is_verified": false, + "line_number": 3628 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "19a998b1bf7031d19515cd35f049a7a6aa7dd1b3", + "is_verified": false, + "line_number": 3632 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ea4d705920dea9e9007f7ffd977007d97d93720a", + "is_verified": false, + "line_number": 3636 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fbf0d229b95ebbc8957fcd166fff38a3c7f07028", + "is_verified": false, + "line_number": 3639 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a519d76306e55e5281869eab851873d3e4eef3df", + "is_verified": false, + "line_number": 3644 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1bdb2af33cb184e89270c6c4fa4eab53dd15dbe5", + "is_verified": false, + "line_number": 3650 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b0925124a2e491cc228ae05e9815cbd602807c6b", + "is_verified": false, + "line_number": 3655 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dba7a636ef0b7f00cb8c1cfce617db1201444012", + "is_verified": false, + "line_number": 3658 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "22b1769808826c0186be14a609a58e9dec5d27c2", + "is_verified": false, + "line_number": 3661 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "67d0256877e876801b1f480b9219d9cee80018c8", + "is_verified": false, + "line_number": 3664 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dab00d860d09dd9092b16718edd82bb24a6a21e9", + "is_verified": false, + "line_number": 3668 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c1a07360a39ba9435ad8e0865784d9be0404f102", + "is_verified": false, + "line_number": 3671 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e13f00057d5ba78e61b5eeebd85f1c32f165a3bc", + "is_verified": false, + "line_number": 3675 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48", + "is_verified": false, + "line_number": 3679 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6c8d2de91b8087bf6f18c9b505ec528e2bafb7c4", + "is_verified": false, + "line_number": 3682 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "50ebc19e203a8f01ff7db97a0230e19f92006bcd", + "is_verified": false, + "line_number": 3686 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "daa2a1a62de835c791bff2fcc5f78ce7c00b3295", + "is_verified": false, + "line_number": 3689 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "794143f823268649c90d053a2116af73069dd7ee", + "is_verified": false, + "line_number": 3693 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "dc84bd66761f714927258320c0bb8dae84587827", + "is_verified": false, + "line_number": 3697 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "46ad1984ce81128fc5040db8164c0cddf8574423", + "is_verified": false, + "line_number": 3701 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "38d576c30351a2efa51a1370871d95af3a753e72", + "is_verified": false, + "line_number": 3706 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7abbd8754af2f0eabe1fc317c5d911bedd01e714", + "is_verified": false, + "line_number": 3711 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "764de27ad572b13ce971b7897fd119148eb73005", + "is_verified": false, + "line_number": 3714 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "93fe91118d3a951aa1f09ce45ba71744bb689082", + "is_verified": false, + "line_number": 3718 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "70ca02dc6a1b468ab2c8bef51d789ccd0921d04a", + "is_verified": false, + "line_number": 3722 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3b7990b0f82bc9bc6c4ec02744f9c02e76aac827", + "is_verified": false, + "line_number": 3726 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9072ebcbad57a1ae4256c5ae37c1e7c7ae5325de", + "is_verified": false, + "line_number": 3730 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a1785e76071823a09f23d2c2b64db80b223c5411", + "is_verified": false, + "line_number": 3734 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7cdaa4e52326d9a81d36d211cd26c2d7a9fc69e1", + "is_verified": false, + "line_number": 3738 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6a66a6ffc1f1c2f31ec1c5c694938bedbae39c02", + "is_verified": false, + "line_number": 3742 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "05b9deaec8f504ac476e1af49d6dd6c7c79f6b36", + "is_verified": false, + "line_number": 3746 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "23423aec1fa9a6422ea8dbb0c73bd0174b2b38cf", + "is_verified": false, + "line_number": 3750 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e0c6ae09dd70fa25056d591ada0ad32df0dfe873", + "is_verified": false, + "line_number": 3753 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4eaeded3211f2c97c2b2629aa5414c68bec24425", + "is_verified": false, + "line_number": 3757 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3479f0e3e92704b5afe32cade0c15e4cf17d2a2d", + "is_verified": false, + "line_number": 3761 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6ccc36eda63bfd2a5227fbfeb34b5ece25a61ce7", + "is_verified": false, + "line_number": 3765 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2017c9b1f1c38285db67283950ac55e5efb09858", + "is_verified": false, + "line_number": 3769 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e35beb1e4de3d98dab18e3299a18cd5634ea0e5e", + "is_verified": false, + "line_number": 3773 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "25b66a038b261e7563e55fbae6006190efbab06c", + "is_verified": false, + "line_number": 3777 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fd8be802abded037933950c2d1cf39f955c824a7", + "is_verified": false, + "line_number": 3781 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "336d02c1e7195c5237197b5884304099ab15c6ba", + "is_verified": false, + "line_number": 3784 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a074015a2ea6680882630eb6afb3cd7c287e581f", + "is_verified": false, + "line_number": 3788 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c44c74adebc4e9832e4ef9dacc60f36520f7e919", + "is_verified": false, + "line_number": 3791 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8db956011899a12e5f3cd3b6451ca77c5896f94d", + "is_verified": false, + "line_number": 3794 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3cec356b4ff63d59160877b7da1bdac2897b3d3d", + "is_verified": false, + "line_number": 3798 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1cb5fddb971e504e9f3c988034dd8643d47b1a43", + "is_verified": false, + "line_number": 3801 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "29c480c45e32cedbac883aeecfde98a25aa06b28", + "is_verified": false, + "line_number": 3804 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ca4f029f71110831a25973c5e1c9343ba6d598e9", + "is_verified": false, + "line_number": 3807 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "76f7a220b13a5c673c9a65d610548916d4fabb64", + "is_verified": false, + "line_number": 3810 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b83adff7c2129c512d565fd193de00d34e99c04c", + "is_verified": false, + "line_number": 3813 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b8e38fac2ae28f11ebab0c7ae8dc458221daa23a", + "is_verified": false, + "line_number": 3818 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "88b23240da7e8668601a071da93ec566ce908432", + "is_verified": false, + "line_number": 3822 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4dcc5c09923b9c0c9014f1f416f58aed8360e584", + "is_verified": false, + "line_number": 3826 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ff801baf6f0fd4cf4799aae74ae6f4c5e0f4ff98", + "is_verified": false, + "line_number": 3830 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "178a2a33e0c0d75178a413e0aac2f8cbab930635", + "is_verified": false, + "line_number": 3834 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a4a038181227633802a86d82f26440209eafe758", + "is_verified": false, + "line_number": 3837 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "db3a7c2334e5452c6aa5e8c61b3b601d871efb9e", + "is_verified": false, + "line_number": 3841 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "37153fe39ca138fd330d60f5de222555ea66d3c1", + "is_verified": false, + "line_number": 3844 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bdaf8b094715fef6fc9d26c411bc0f51e1f4d6bd", + "is_verified": false, + "line_number": 3848 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d4f8d980c943fb306089e921db8c8afd41e8a906", + "is_verified": false, + "line_number": 3852 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b1c179f98a45a16ae541474f4a1b00bfd13ea1cc", + "is_verified": false, + "line_number": 3856 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a48d30d51da7ad4bb3ea87ab478e6449f0de91a1", + "is_verified": false, + "line_number": 3860 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "24df42fe64002ee53068a2a431de96b7e4929bb1", + "is_verified": false, + "line_number": 3864 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1abe6ae85781e4ff2fe336d4e3c2db3ca0d6bbc9", + "is_verified": false, + "line_number": 3867 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "27e973c84918fddd412bd5b22d5c53d9acd70152", + "is_verified": false, + "line_number": 3871 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d6748e1949c82ef97ad50db2314269620e3abe54", + "is_verified": false, + "line_number": 3875 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4c8a960439864672cef22edf28a40c8a3749ed68", + "is_verified": false, + "line_number": 3879 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2764e31835af219e01a144b32b3088b583d0b142", + "is_verified": false, + "line_number": 3883 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8c14ca426e18afd6aedb33f1e4af45d10ff480c9", + "is_verified": false, + "line_number": 3887 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "791f38c64d2c51c10e14db462464c2666734d875", + "is_verified": false, + "line_number": 3891 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bd781c148f26d1f4846c44ad3eb29636a326b436", + "is_verified": false, + "line_number": 3895 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "59eb93688655aaa71ba47d2f0271cd709e14c862", + "is_verified": false, + "line_number": 3899 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3e536139add3f70bf34712d57cf490dc9c631553", + "is_verified": false, + "line_number": 3903 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "69e4418b2ffb487c86b50465bb7a8dd539577c3f", + "is_verified": false, + "line_number": 3907 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "869a40b729083c41268e4ff73da71ad253b63821", + "is_verified": false, + "line_number": 3911 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1f6aa1b96bb584fbe643bd6cfe9e99f6889bf6a5", + "is_verified": false, + "line_number": 3915 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b5ff241f1dfeac7af202ffcfa8df6b14badc21d5", + "is_verified": false, + "line_number": 3919 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a2f78b039096fb5779092940db6f92abdd02b248", + "is_verified": false, + "line_number": 3923 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "eeacdd78906304066ade634711edfa3120b26515", + "is_verified": false, + "line_number": 3926 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfbe2718fb7831987dda00ef00e5026befcadf0c", + "is_verified": false, + "line_number": 3929 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4912d571169f95e8793115e75a6d9eeff542e56c", + "is_verified": false, + "line_number": 3933 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a7f8afa1a4e2b25e289367985ad5ec1bd05ee9b6", + "is_verified": false, + "line_number": 3939 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "ac89a1c44566548af25a362310fd0f5520667be0", + "is_verified": false, + "line_number": 3944 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6a41538e3f5cf147eb25ba78550fb21dc000f93b", + "is_verified": false, + "line_number": 3948 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "935d3e23fbaf7a81e9f2e58bfd49d8e3860c0366", + "is_verified": false, + "line_number": 3951 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6cc5afba59c5bd26a4795e037f5b9d1ef6cb2ce7", + "is_verified": false, + "line_number": 3954 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f8e96988ec776bf5b53759a89a56a5a49348ed55", + "is_verified": false, + "line_number": 3957 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "324c69013b2b1138531752b1a3b60d78b08c5d8d", + "is_verified": false, + "line_number": 3960 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1a6ef22a2b40ecd3ced82dbdad3274d2813b1f9c", + "is_verified": false, + "line_number": 3964 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "66492b55c53e0fdf2f1b9c6eb0f0f065ed24961a", + "is_verified": false, + "line_number": 3968 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1528a569a2fd746c9f56d0c681950464f681bf88", + "is_verified": false, + "line_number": 3972 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1898fd2a531bd4a2dcbf84f24d6bda4d13160881", + "is_verified": false, + "line_number": 3975 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "36a41bfa336a40f390233bcb3edba75b423c0ac5", + "is_verified": false, + "line_number": 3979 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c2cfd88522dc79c5d1764af80e11fb966935bee0", + "is_verified": false, + "line_number": 3983 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "72e7ed15b07c6f0fb80160dbe9fd03688cbb7ea9", + "is_verified": false, + "line_number": 3987 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6cc024e7e2a596b3d50ab90e14d24b88d74b0779", + "is_verified": false, + "line_number": 3991 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "47a2a2576064434ef2f063e1f0a5eec8f5cac3a4", + "is_verified": false, + "line_number": 3995 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e524ce9b00c97b38b4e9aace540ecf2b9fd6d27d", + "is_verified": false, + "line_number": 3999 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6964ce0f7c8c41461cc2f11272cfd5b0cf9ae892", + "is_verified": false, + "line_number": 4003 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "525fd4b11ce0f9a03ce67f1645f060cd21fe2a01", + "is_verified": false, + "line_number": 4007 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d2cfb5398357e3e4db8b177b22335326b92006d5", + "is_verified": false, + "line_number": 4013 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9bf200e3147a1d837839d6bf5b649716c1537cbb", + "is_verified": false, + "line_number": 4018 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9f91fea88d1567fbf820faf0c79923ce8eb821fa", + "is_verified": false, + "line_number": 4021 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2684ff853d2e3bd3dd714033d3ea2fcb39ee8459", + "is_verified": false, + "line_number": 4024 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a09ac4be578a7b177bc5f823f8624e32f10751fb", + "is_verified": false, + "line_number": 4027 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cf8769a85052b2fe5947cc828d2c35305e3cc2d4", + "is_verified": false, + "line_number": 4031 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6d385be18492ae97a5e0f454745bdb513796f720", + "is_verified": false, + "line_number": 4035 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e48841ceb23f37c6285b544a6061701cffd33739", + "is_verified": false, + "line_number": 4039 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "15722b2d85dd78abf0730839c0ebdfb5873a2da4", + "is_verified": false, + "line_number": 4043 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3d987c7a2a438b1d3c5cff9c6c39fd4bf3c9ffbd", + "is_verified": false, + "line_number": 4047 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "96b299789c89faf2ca7467ca7e034c969bed15f6", + "is_verified": false, + "line_number": 4051 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "61daef219a32046633e73ba8c3fefa7d4ab6b969", + "is_verified": false, + "line_number": 4055 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b933e7804f474734a5b09f73902497c3d1336b00", + "is_verified": false, + "line_number": 4059 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3a2e20f18e6484a2484bcc9c9169ab2370cf5b1b", + "is_verified": false, + "line_number": 4063 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1363ce90fdfa496df9f847502eaf2699b6776d2f", + "is_verified": false, + "line_number": 4067 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5717d97b729ea9903db3476d057ab8af569f6f34", + "is_verified": false, + "line_number": 4071 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d9777d0c95f98a54ace74a709c1a3cd17016fc5f", + "is_verified": false, + "line_number": 4075 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "360598017ee18da4abe2248e1c230a4505ddc5c6", + "is_verified": false, + "line_number": 4078 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c936ceac94d4e43d2f734b4b8b8231cb368458e0", + "is_verified": false, + "line_number": 4085 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6e052fe5b46f0af3010617109aaecb06e8a1f300", + "is_verified": false, + "line_number": 4090 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9fc140f75708ab675a4ab90516d10dd26ed0e724", + "is_verified": false, + "line_number": 4095 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1256ac04591c96bf4843a3cf47bc483dd172317d", + "is_verified": false, + "line_number": 4099 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "99dd278a233148eaf159f0062fa26f63fa3205ab", + "is_verified": false, + "line_number": 4102 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "798f9aebdd4bf85e0bdb03faad72aee2708a58c3", + "is_verified": false, + "line_number": 4106 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "292b56ccb1313ea50f4d1bf0efc7f2b04670fe73", + "is_verified": false, + "line_number": 4110 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a0a9f050339fb7dfc50f201b52645f99c719dc6e", + "is_verified": false, + "line_number": 4114 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d52512731e222109568090fb46fc7cd43890a312", + "is_verified": false, + "line_number": 4117 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "9eaf006db1f782b8da68c3ae97eae05f36c62973", + "is_verified": false, + "line_number": 4121 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "6a48514ae5f5a41d7d00d480a07fe8bba306360f", + "is_verified": false, + "line_number": 4127 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "390113d1af6b5ece9a90ea6db402a0008b2a9d08", + "is_verified": false, + "line_number": 4131 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c302d3df9130c526ac3ede2363a31bfc4c162abb", + "is_verified": false, + "line_number": 4134 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4885a73ae27294938a483e855613b49825a795e8", + "is_verified": false, + "line_number": 4138 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "44c7ecfd47e1b85be18de43744314e6ab196b697", + "is_verified": false, + "line_number": 4142 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "8af5d8dc6608e85d5fa11a6e06ad79765fff659b", + "is_verified": false, + "line_number": 4146 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4311b1992ce595dc1c806a790ce9b90f5ea08e48", + "is_verified": false, + "line_number": 4150 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "02e81c37054304b4992b7225e4aefa96905d0708", + "is_verified": false, + "line_number": 4153 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5c81cb3bbf077a30ebeea809bdba83483c8d5dd8", + "is_verified": false, + "line_number": 4157 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4a1c248f3b2ff8b4ac7ee3bd67eafb7b69668f93", + "is_verified": false, + "line_number": 4161 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e61969a88951f6908cebfd7860868ff6aa03ac5d", + "is_verified": false, + "line_number": 4164 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "63c5647d569cb91eae2e899230886e5d0f6142b8", + "is_verified": false, + "line_number": 4168 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "82b1fabe958ce0e451f6709cb91914eb7a2c5acc", + "is_verified": false, + "line_number": 4172 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5b2f7f4e37d6b30d07f003ccad464736e7eca574", + "is_verified": false, + "line_number": 4175 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "a83323d73f45e76ce84e7f76ae13d71299152ba9", + "is_verified": false, + "line_number": 4179 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "cb9117ab59839ffa218ac1e0b2fe8237a12f9922", + "is_verified": false, + "line_number": 4183 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "46112fdba033b159ef808b435ad45e27e37d1f0d", + "is_verified": false, + "line_number": 4187 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1af24749abdc895bb01d76353d8bcdcc1a3ce370", + "is_verified": false, + "line_number": 4191 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "7829049dba2b2e480bbff402cdf1dbefe656e36f", + "is_verified": false, + "line_number": 4195 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fb0efd2d61c113c008a898abd9b1c9630705b752", + "is_verified": false, + "line_number": 4199 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f0257d02d23f53e799476af74cf0386375d4b6c2", + "is_verified": false, + "line_number": 4203 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bfb0fdba3d55b35175d739a225f13fbbb54e0d25", + "is_verified": false, + "line_number": 4208 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "749184805a59b4e245165000ff76d7e916f3efa5", + "is_verified": false, + "line_number": 4212 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "e8052d6a30f5c788cee736b595d620d412596dc8", + "is_verified": false, + "line_number": 4216 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "47f61db9968d9147373f9919fb2d39f044af96d4", + "is_verified": false, + "line_number": 4220 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c1907fc992b0e961b0c622ff8cd91edc36456c2a", + "is_verified": false, + "line_number": 4224 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "0c1c26042e18355cace0924455a693bd5959dec8", + "is_verified": false, + "line_number": 4228 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c1a38efe10a08cb343b44ec85e4f991fa64e0dc8", + "is_verified": false, + "line_number": 4232 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5d1a39a23143cbbaf35d7aefe996873a777b4131", + "is_verified": false, + "line_number": 4235 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "b5acc3ebb6bebc72bf34bf1b65ca79c72c75f3c1", + "is_verified": false, + "line_number": 4238 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3e30748c91e19b53d8cde9d38a70c993d494dfae", + "is_verified": false, + "line_number": 4242 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "3744d756235c33e86a82259a8d3b8dd6569789fc", + "is_verified": false, + "line_number": 4254 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "60f62b99a14507fe2d275d65d1bc793565396724", + "is_verified": false, + "line_number": 4258 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d534a8643166b53f9b72f567856071d026b7d454", + "is_verified": false, + "line_number": 4262 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "fd979750316587a434d421119fe616d5325ac2fb", + "is_verified": false, + "line_number": 4265 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c0665d268ee6985b4ce98c67b9c6c7502af11a25", + "is_verified": false, + "line_number": 4269 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "03edbe2e35e4244d03ec1f59ae4b9e86cb180365", + "is_verified": false, + "line_number": 4272 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "4ff18905b83280512ef37332280dc2c71540e189", + "is_verified": false, + "line_number": 4275 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "c186069e94c96664398d9ccf7eb5fc6d7c232f75", + "is_verified": false, + "line_number": 4279 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "870edf2e3d49cca58020ed671229678252aab029", + "is_verified": false, + "line_number": 4284 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "f517feec4703cacc2629b34a71b6c4f71dcaaacb", + "is_verified": false, + "line_number": 4288 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "5888d1b730d17d304b51d9ac433812c153d85b55", + "is_verified": false, + "line_number": 4292 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "1b7fa9d44dae627da859f13db227e3628d4b177d", + "is_verified": false, + "line_number": 4296 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "bc001a988dc93a1d9f0b811c8b6f5ed71cd922e8", + "is_verified": false, + "line_number": 4299 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "d20d1d54c7478c3d210e559a91128c9ee7e31edb", + "is_verified": false, + "line_number": 4303 + }, + { + "type": "Base64 High Entropy String", + "filename": "pnpm-lock.yaml", + "hashed_secret": "2a6ec2710324e581e13c5bd7ce5f78fd563b3db0", + "is_verified": false, + "line_number": 4307 + } + ] + }, + "generated_at": "2025-11-03T09:05:11Z" +} diff --git a/ENVIRONMENT_SETUP.md b/ENVIRONMENT_SETUP.md index 1a25fc8..3c1b169 100644 --- a/ENVIRONMENT_SETUP.md +++ b/ENVIRONMENT_SETUP.md @@ -127,4 +127,3 @@ docker build -f dockerfiles/iflow-agent.Dockerfile -t iflow-agent:latest . 3. Check the container logs at `storage/{task_id}/agents/iflow/container_stdout.log` - You should see "100% context left" instead of "Disconnected" - The agent should successfully initialize and accept prompts - diff --git a/IMPLEMENTATION_STATUS.md b/IMPLEMENTATION_STATUS.md index c0e697a..ec5e44a 100644 --- a/IMPLEMENTATION_STATUS.md +++ b/IMPLEMENTATION_STATUS.md @@ -109,7 +109,7 @@ Example refactor: ```python # OLD (Docker-based) subprocess.run([ - "docker", "run", + "docker", "run", "-v", f"{repo_dir}:/agent/workspace", "iflow-agent:latest" ]) @@ -208,4 +208,3 @@ FastAPI → Worker → Agent SDK → Direct API Calls - Update deployment instructions - Add SDK troubleshooting guide - Document comparison API endpoint - diff --git a/LOGGING_GUIDE.md b/LOGGING_GUIDE.md index 99cbad1..0a78737 100644 --- a/LOGGING_GUIDE.md +++ b/LOGGING_GUIDE.md @@ -187,4 +187,3 @@ http://127.0.0.1:8000/logs?taskId=YOUR_TASK_ID ``` All the detailed logs from the API log file will now appear in the UI in real-time! - diff --git a/README.md b/README.md index 6cc43aa..df636f5 100644 --- a/README.md +++ b/README.md @@ -32,8 +32,9 @@ A comprehensive evaluation system for testing AI agent performance under memory ### Prerequisites - Python 3.11+ (avoid 3.13 due to asyncpg compatibility) -- PostgreSQL database -- Redis server +- Docker and Docker Compose (for PostgreSQL and Redis) +- PostgreSQL database (or use Docker) +- Redis server (or use Docker) - Agent CLIs: iFlow, Claude, Gemini ### Installation @@ -41,42 +42,91 @@ A comprehensive evaluation system for testing AI agent performance under memory ```bash # 1. Clone and setup environment git clone -cd cli-eval-poc/tools +cd memory-evals python -m venv venv source venv/bin/activate # On Windows: venv\Scripts\activate # 2. Install dependencies +pip install --upgrade pip pip install -e . # 3. Configure environment -cp .env.example .env -# Edit .env with your API keys and database settings - -# 4. Setup database +# Create .env file with your API keys and database settings +# See Configuration section below for required variables + +# 4. Start PostgreSQL and Redis with Docker +# Start PostgreSQL container +docker run -d --name cli_eval_postgres \ + -e POSTGRES_USER=erashu212 \ + -e POSTGRES_PASSWORD='Enter123_' \ + -e POSTGRES_DB=cli_eval_db \ + -p 5432:5432 \ + postgres:latest + +# Start Redis container +docker run -d --name redis \ + -p 6379:6379 \ + redis:latest + +# Wait a few seconds for databases to initialize +sleep 5 + +# 5. Setup database migrations alembic upgrade head -# 5. Start services -# Terminal 1: Redis -redis-server +# 6. Start services (Option A: Using startup script - Recommended) +./scripts/run.sh -# Terminal 2: API Server +# OR start services manually (Option B) +# Terminal 1: API Server uvicorn app.main:app --host 127.0.0.1 --port 8000 --reload -# Terminal 3: Worker +# Terminal 2: Worker python worker.py -# 6. Open dashboard +# 7. Open dashboard open http://localhost:8000 ``` +### Quick Start with Docker (Alternative) + +If you prefer to use Docker for everything, you can also start PostgreSQL and Redis containers as needed: + +```bash +# Stop containers (if already running) +docker stop cli_eval_postgres redis 2>/dev/null || true +docker rm cli_eval_postgres redis 2>/dev/null || true + +# Start PostgreSQL +docker run -d --name cli_eval_postgres \ + -e POSTGRES_USER=your_username \ + -e POSTGRES_PASSWORD=your_password \ + -e POSTGRES_DB=cli_eval_db \ + -p 5432:5432 \ + postgres:latest + +# Start Redis +docker run -d --name redis \ + -p 6379:6379 \ + redis:latest + +# Verify containers are running +docker ps +``` + ## Configuration ### Required Environment Variables #### Database & Redis ```bash -DATABASE_URL=postgresql://user:password@localhost:5432/memory_break_db +# Note: DATABASE_URL should use psycopg2 driver for SQLAlchemy +DATABASE_URL=postgresql+psycopg2://user:password@localhost:5432/cli_eval_db REDIS_URL=redis://localhost:6379/0 + +# Example with Docker containers: +# DATABASE_URL=postgresql+psycopg2://erashu212:Enter123_@localhost:5432/cli_eval_db +# REDIS_URL=redis://localhost:6379/0 ``` #### API Keys @@ -223,19 +273,47 @@ Each dimension is scored 0.0-1.0, with an overall average determining pass/fail **Database Connection Errors** ```bash -# Check PostgreSQL is running -pg_ctl status +# Check PostgreSQL Docker container is running +docker ps --filter "name=cli_eval_postgres" -# Verify connection string -psql "postgresql://user:password@localhost:5432/memory_break_db" +# Check container logs if there are issues +docker logs cli_eval_postgres + +# Verify connection (if using Docker) +docker exec -it cli_eval_postgres psql -U erashu212 -d cli_eval_db + +# Or verify with psql client +psql "postgresql://user:password@localhost:5432/cli_eval_db" + +# If container is not running, start it: +docker start cli_eval_postgres + +# If container doesn't exist, create it: +docker run -d --name cli_eval_postgres \ + -e POSTGRES_USER=erashu212 \ + -e POSTGRES_PASSWORD='Enter123_' \ + -e POSTGRES_DB=cli_eval_db \ + -p 5432:5432 \ + postgres:latest ``` **Redis Connection Errors** ```bash -# Check Redis is running -redis-cli ping +# Check Redis Docker container is running +docker ps --filter "name=redis" + +# Check container logs if there are issues +docker logs redis +# Verify Redis is responding +redis-cli ping # Should return: PONG + +# If container is not running, start it: +docker start redis + +# If container doesn't exist, create it: +docker run -d --name redis -p 6379:6379 redis:latest ``` **Agent CLI Issues** @@ -253,10 +331,44 @@ gemini --version ### Logs -- **API Server**: Check console output or logs/ -- **Worker**: Check worker.log +- **API Server**: Check console output or `logs/api.log` +- **Worker**: Check `logs/worker.log` or console output - **Task Logs**: Available via web dashboard or API -- **Agent Transcripts**: Stored in storage/{task_id}/ +- **Agent Transcripts**: Stored in `storage/{task_id}/` +- **Docker Containers**: + ```bash + docker logs cli_eval_postgres # PostgreSQL logs + docker logs redis # Redis logs + ``` + +### Service Management + +**Using the startup script:** +```bash +# Start all services +./scripts/run.sh + +# Stop all services +./scripts/stop.sh +``` + +**Manual service management:** +```bash +# Start PostgreSQL +docker start cli_eval_postgres + +# Start Redis +docker start redis + +# Stop PostgreSQL +docker stop cli_eval_postgres + +# Stop Redis +docker stop redis + +# Remove containers (⚠️ This will delete data) +docker rm -f cli_eval_postgres redis +``` ## Contributing @@ -268,4 +380,4 @@ gemini --version ## License -[License information here] \ No newline at end of file +[License information here] diff --git a/alembic.ini b/alembic.ini index 46e7f3b..bcee6ab 100644 --- a/alembic.ini +++ b/alembic.ini @@ -83,7 +83,7 @@ path_separator = os # database URL will be read from the application configuration (.env file) # See alembic/env.py for the actual configuration -# sqlalchemy.url = +# sqlalchemy.url = [post_write_hooks] diff --git a/alembic/README b/alembic/README index 98e4f9c..2500aa1 100644 --- a/alembic/README +++ b/alembic/README @@ -1 +1 @@ -Generic single-database configuration. \ No newline at end of file +Generic single-database configuration. diff --git a/alembic/env.py b/alembic/env.py index ffe8397..cc0dc17 100644 --- a/alembic/env.py +++ b/alembic/env.py @@ -1,9 +1,8 @@ import sys -from pathlib import Path from logging.config import fileConfig +from pathlib import Path -from sqlalchemy import engine_from_config, create_engine -from sqlalchemy import pool +from sqlalchemy import engine_from_config, pool from alembic import context @@ -12,11 +11,11 @@ sys.path.insert(0, str(project_root)) # Import application configuration and models -from app.config import settings from sqlmodel import SQLModel +from app.config import settings + # Import all database models so they are registered with SQLModel.metadata -from app.infrastructure.database import TaskDB, AgentRunDB, ScoreDB, ArtifactDB # this is the Alembic Config object, which provides # access to the values within the .ini file in use. @@ -78,9 +77,7 @@ def run_migrations_online() -> None: ) with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) + context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() diff --git a/app/agents/base.py b/app/agents/base.py index 9574bf5..42ba248 100644 --- a/app/agents/base.py +++ b/app/agents/base.py @@ -1,64 +1,63 @@ """Base agent interface and common functionality.""" -import shutil import logging +import shutil from abc import ABC, abstractmethod from pathlib import Path -from typing import Dict, List, Optional, Protocol, Any +from typing import Any, Protocol from uuid import UUID -from app.domain.entities import AgentName, AgentRun +from app.domain.entities import AgentName logger = logging.getLogger(__name__) class AgentSession(Protocol): """Session information for agent execution.""" - + task_id: UUID agent_run_id: UUID repo_dir: Path output_dir: Path - prompts: Dict[str, str] + prompts: dict[str, str] timeout: int class CompressionDetector(ABC): """Abstract base class for compression detection strategies.""" - + @abstractmethod def detect_compression(self, session_data: str) -> bool: """Detect if compression has occurred based on session data.""" - pass - + @abstractmethod - def should_enter_memory_only(self, session_data: str, previous_state: Dict[str, Any]) -> bool: + def should_enter_memory_only( + self, session_data: str, previous_state: dict[str, Any] + ) -> bool: """Determine if agent should enter memory-only mode.""" - pass class AgentAdapter(ABC): """Abstract base class for AI agent adapters.""" - - def __init__(self, name: AgentName, binary_path: Optional[str] = None): + + def __init__(self, name: AgentName, binary_path: str | None = None): self.name = name self.binary_path = binary_path or name.value - self.compression_detector: Optional[CompressionDetector] = None + self.compression_detector: CompressionDetector | None = None self.logger = logging.getLogger(f"agents.{name.value}") - + @abstractmethod def validate_installation(self) -> bool: """Validate that the agent CLI is properly installed.""" - pass - + @abstractmethod def run_session( self, session: AgentSession, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Run a complete agent session. - + Returns: Dict containing: - artifacts: Dict[str, str] (name -> file_path) @@ -66,49 +65,47 @@ def run_session( - compression_detected: bool - milestones: List[str] """ - pass - + @abstractmethod - def get_version_info(self) -> Dict[str, str]: + def get_version_info(self) -> dict[str, str]: """Get agent version and system information.""" - pass - + def check_binary_exists(self) -> bool: """Check if the agent binary exists in PATH.""" return shutil.which(self.binary_path) is not None - + def setup_output_directory(self, output_dir: Path) -> None: """Setup output directory for agent artifacts.""" output_dir.mkdir(parents=True, exist_ok=True) self.logger.info(f"Created output directory: {output_dir}") - + def cleanup_session(self, session: AgentSession) -> None: """Cleanup after session completion.""" # Default implementation - can be overridden self.logger.info(f"Session cleanup completed for {self.name.value}") - - def handle_error(self, error: Exception, session: AgentSession) -> Dict[str, Any]: + + def handle_error(self, error: Exception, session: AgentSession) -> dict[str, Any]: """Handle errors during session execution.""" self.logger.error(f"Error in {self.name.value} session: {error}") - + error_artifact_path = session.output_dir / "error.txt" with open(error_artifact_path, "w") as f: - f.write(f"Error: {str(error)}\n") + f.write(f"Error: {error!s}\n") f.write(f"Agent: {self.name.value}\n") f.write(f"Task ID: {session.task_id}\n") - + return { "artifacts": {"error": str(error_artifact_path)}, "stats": {"error": str(error)}, "compression_detected": False, "milestones": ["error"], } - - async def execute_evaluation(self, eval_params: Dict[str, Any]) -> Dict[str, Any]: + + async def execute_evaluation(self, eval_params: dict[str, Any]) -> dict[str, Any]: """ Wrapper method for container-based execution. Converts simple eval_params to AgentSession and calls run_session(). - + Args: eval_params: Dictionary containing: - pr_url: str @@ -117,59 +114,61 @@ async def execute_evaluation(self, eval_params: Dict[str, Any]) -> Dict[str, Any - max_files: int (optional) - rubric: List[str] (optional) - timeout_seconds: int (optional) - + Returns: Dict containing execution results """ from uuid import uuid4 - + # Map prompt keys from service format to agent format - prompts = eval_params.get('prompts', {}) + prompts = eval_params.get("prompts", {}) mapped_prompts = { - 'pre': prompts.get('precompression', ''), - 'deep': prompts.get('deepdive', ''), - 'memory_only': prompts.get('memory_only', ''), - 'eval': prompts.get('evaluator_set', '') + "pre": prompts.get("precompression", ""), + "deep": prompts.get("deepdive", ""), + "memory_only": prompts.get("memory_only", ""), + "eval": prompts.get("evaluator_set", ""), } - + # Create a simple AgentSession-like object class SimpleSession: def __init__(self, params, prompts_mapped): self.task_id = uuid4() self.agent_run_id = uuid4() - self.repo_dir = Path(params['workspace_dir']) - self.output_dir = self.repo_dir.parent / 'output' + self.repo_dir = Path(params["workspace_dir"]) + self.output_dir = self.repo_dir.parent / "output" self.prompts = prompts_mapped - self.timeout = params.get('timeout_seconds', 1800) - + self.timeout = params.get("timeout_seconds", 1800) + session = SimpleSession(eval_params, mapped_prompts) - + # Call the existing run_session method result = self.run_session(session) - + return result class StandardCompressionDetector(CompressionDetector): """Standard compression detection implementation.""" - + def __init__(self, threshold_low: int = 30, jump_threshold: int = 30): self.threshold_low = threshold_low self.jump_threshold = jump_threshold - + def detect_compression(self, session_data: str) -> bool: """Detect compression based on context percentage thresholds.""" # This is a base implementation - specific agents will override return False - - def should_enter_memory_only(self, session_data: str, previous_state: Dict[str, Any]) -> bool: + + def should_enter_memory_only( + self, session_data: str, previous_state: dict[str, Any] + ) -> bool: """Determine if should enter memory-only mode.""" return self.detect_compression(session_data) class AgentCapabilities: """Agent capabilities and feature flags.""" - + def __init__( self, supports_export: bool = False, @@ -187,7 +186,7 @@ def __init__( class AgentMetadata: """Agent metadata for registry and discovery.""" - + def __init__( self, name: AgentName, @@ -209,7 +208,7 @@ def __init__( class BaseAgentException(Exception): """Base exception for agent-related errors.""" - + def __init__(self, agent_name: str, message: str): self.agent_name = agent_name self.message = message @@ -218,19 +217,15 @@ def __init__(self, agent_name: str, message: str): class AgentNotFoundError(BaseAgentException): """Raised when agent binary is not found.""" - pass class AgentExecutionError(BaseAgentException): """Raised when agent execution fails.""" - pass class AgentTimeoutError(BaseAgentException): """Raised when agent execution times out.""" - pass class AgentValidationError(BaseAgentException): """Raised when agent validation fails.""" - pass diff --git a/app/agents/claude_agent.py b/app/agents/claude_agent.py index b3bd8ee..5045986 100644 --- a/app/agents/claude_agent.py +++ b/app/agents/claude_agent.py @@ -1,18 +1,21 @@ """Claude AI agent adapter using Anthropic SDK.""" -import logging import asyncio +import logging from pathlib import Path -from typing import Dict, Any, Optional, List +from typing import Any from anthropic import Anthropic -from app.domain.entities import AgentName from app.agents.base import ( - AgentAdapter, AgentSession, AgentCapabilities, - AgentMetadata, AgentExecutionError + AgentAdapter, + AgentCapabilities, + AgentExecutionError, + AgentMetadata, + AgentSession, ) from app.config import settings +from app.domain.entities import AgentName from app.services.task_logger import AgentSessionLogger logger = logging.getLogger(__name__) @@ -20,7 +23,7 @@ class ClaudeAgent(AgentAdapter): """Claude AI agent adapter using Anthropic SDK for direct API interaction.""" - + def __init__(self): super().__init__(AgentName.CLAUDE, "claude") # No binary needed # Don't instantiate the client here - create it in the async context @@ -28,17 +31,17 @@ def __init__(self): self.max_tokens = settings.max_context_tokens self.max_turns = settings.max_turns self.session_timeout = settings.agent_session_timeout - + def validate_installation(self) -> bool: """Validate that Anthropic API key is configured.""" if not settings.anthropic_api_key: self.logger.error("Anthropic API key not configured") return False - + self.logger.info("Claude (Anthropic SDK) validation successful") return True - - def get_version_info(self) -> Dict[str, str]: + + def get_version_info(self) -> dict[str, str]: """Get Claude version and system information.""" return { "model": self.model, @@ -46,312 +49,414 @@ def get_version_info(self) -> Dict[str, str]: "api_configured": str(bool(settings.anthropic_api_key)), "sdk": "anthropic-python", } - + def _load_repo_files(self, repo_dir: Path, max_files: int = 50) -> str: """Load repository files into a context string with token limit.""" self.logger.info(f"Loading repository files from {repo_dir}") - + # Limit initial context to ~50K tokens (leaving room for conversation) MAX_CONTEXT_TOKENS = 50000 - + # Common code file extensions code_extensions = { - '.py', '.js', '.ts', '.tsx', '.jsx', '.java', '.cpp', '.c', '.h', - '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.cs', '.scala', - '.md', '.json', '.yaml', '.yml', '.toml', '.xml', '.html', '.css' + ".py", + ".js", + ".ts", + ".tsx", + ".jsx", + ".java", + ".cpp", + ".c", + ".h", + ".go", + ".rs", + ".rb", + ".php", + ".swift", + ".kt", + ".cs", + ".scala", + ".md", + ".json", + ".yaml", + ".yml", + ".toml", + ".xml", + ".html", + ".css", } - + files_content = [] file_count = 0 total_tokens = 0 - + try: - for file_path in repo_dir.rglob('*'): + for file_path in repo_dir.rglob("*"): if file_count >= max_files: break - + # Skip directories and hidden files - if file_path.is_dir() or file_path.name.startswith('.'): + if file_path.is_dir() or file_path.name.startswith("."): continue - + # Skip common non-code directories - if any(part in file_path.parts for part in ['.git', 'node_modules', '__pycache__', 'venv', 'dist', 'build']): + if any( + part in file_path.parts + for part in [ + ".git", + "node_modules", + "__pycache__", + "venv", + "dist", + "build", + ] + ): continue - + # Only include code files if file_path.suffix not in code_extensions: continue - + try: relative_path = file_path.relative_to(repo_dir) - content = file_path.read_text(encoding='utf-8', errors='ignore') - + content = file_path.read_text(encoding="utf-8", errors="ignore") + # Estimate tokens (rough: 1 token ~= 4 characters) file_tokens = len(content) // 4 - + # Stop if we would exceed token limit if total_tokens + file_tokens > MAX_CONTEXT_TOKENS: - self.logger.info(f"Stopping at {file_count} files to stay within {MAX_CONTEXT_TOKENS} token limit") + self.logger.info( + f"Stopping at {file_count} files to stay within {MAX_CONTEXT_TOKENS} token limit" + ) break - - files_content.append(f"### File: {relative_path}\n```{file_path.suffix[1:]}\n{content}\n```\n") + + files_content.append( + f"### File: {relative_path}\n```{file_path.suffix[1:]}\n{content}\n```\n" + ) file_count += 1 total_tokens += file_tokens - + except Exception as e: self.logger.warning(f"Could not read {file_path}: {e}") continue - + except Exception as e: self.logger.error(f"Error loading repository files: {e}") return f"Error loading repository: {e}" - - self.logger.info(f"Loaded {file_count} files (~{total_tokens} tokens) from repository") - + + self.logger.info( + f"Loaded {file_count} files (~{total_tokens} tokens) from repository" + ) + if not files_content: return "No code files found in repository." - - return f"# Repository Code\n\n" + "\n\n".join(files_content) - - def run_session(self, session: AgentSession) -> Dict[str, Any]: + + return "# Repository Code\n\n" + "\n\n".join(files_content) + + def run_session(self, session: AgentSession) -> dict[str, Any]: """Run complete Claude session using Anthropic SDK.""" self.setup_output_directory(session.output_dir) - + # Create transcript file transcript_path = session.output_dir / "transcript.txt" - + # Create session logger for UI streaming session_logger = AgentSessionLogger(session.task_id, "claude") - + try: # Run async session - result = asyncio.run(self._run_async_session(session, transcript_path, session_logger)) - + result = asyncio.run( + self._run_async_session(session, transcript_path, session_logger) + ) + # Add file paths to result result["artifacts"]["transcript"] = str(transcript_path) - + # Close session logger session_logger.close_session("completed", result.get("artifacts", {})) - + return result - + except Exception as e: self.logger.error(f"Claude session failed: {e}", exc_info=True) session_logger.close_session("failed", {"error": str(e)}) return self.handle_error(e, session) - + async def _run_async_session( self, session: AgentSession, transcript_path: Path, - session_logger: AgentSessionLogger - ) -> Dict[str, Any]: + session_logger: AgentSessionLogger, + ) -> dict[str, Any]: """Run the async Claude session using Anthropic SDK.""" - + milestones = [] stats = {} responses = [] total_tokens = 0 hit_limit = False - + # Create synchronous client client = Anthropic(api_key=settings.anthropic_api_key) - + try: with open(transcript_path, "w", encoding="utf-8") as log_file: # Phase 1: Load repository context self.logger.info("=" * 80) self.logger.info("PHASE 1: Loading Repository Context") self.logger.info("=" * 80) - + repo_context = self._load_repo_files(session.repo_dir) - log_file.write(f"Repository Context Loaded: {len(repo_context)} characters\n") + log_file.write( + f"Repository Context Loaded: {len(repo_context)} characters\n" + ) log_file.write("=" * 80 + "\n\n") - + init_prompt = f"{repo_context}\n\nThis is a code repository. Please analyze it and be ready to answer questions about it." - messages = [ - {"role": "user", "content": init_prompt} - ] - + messages = [{"role": "user", "content": init_prompt}] + # Log to UI session_logger.log_prompt_sent(init_prompt, "repo_initialization") - + # Initial context loading response = client.messages.create( - model=self.model, - max_tokens=4096, - messages=messages + model=self.model, max_tokens=4096, messages=messages ) - - total_tokens += response.usage.input_tokens + response.usage.output_tokens - messages.append({"role": "assistant", "content": response.content[0].text}) - + + total_tokens += ( + response.usage.input_tokens + response.usage.output_tokens + ) + messages.append( + {"role": "assistant", "content": response.content[0].text} + ) + log_file.write(f"ASSISTANT: {response.content[0].text}\n\n") log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") log_file.write("=" * 80 + "\n\n") - + # Log to UI - session_logger.log_agent_response(response.content[0].text, "repo_analysis") - session_logger.log_context_stats(f"{total_tokens}/{self.max_tokens}", f"Tokens: {total_tokens:,}") - + session_logger.log_agent_response( + response.content[0].text, "repo_analysis" + ) + session_logger.log_context_stats( + f"{total_tokens}/{self.max_tokens}", f"Tokens: {total_tokens:,}" + ) + milestones.append("repo_loaded") stats["initial_tokens"] = total_tokens - + # Phase 2: Pre-compression prompt self.logger.info("=" * 80) self.logger.info("PHASE 2: Pre-Compression Analysis") self.logger.info("=" * 80) - - session_logger.log_prompt_sent(session.prompts["precompression"], "pre_compression") - messages.append({"role": "user", "content": session.prompts["precompression"]}) - + + session_logger.log_prompt_sent( + session.prompts["precompression"], "pre_compression" + ) + messages.append( + {"role": "user", "content": session.prompts["precompression"]} + ) + response = client.messages.create( - model=self.model, - max_tokens=4096, - messages=messages + model=self.model, max_tokens=4096, messages=messages + ) + + total_tokens += ( + response.usage.input_tokens + response.usage.output_tokens + ) + messages.append( + {"role": "assistant", "content": response.content[0].text} ) - - total_tokens += response.usage.input_tokens + response.usage.output_tokens - messages.append({"role": "assistant", "content": response.content[0].text}) - responses.append({"phase": "pre_compression", "response": response.content[0].text}) - + responses.append( + {"phase": "pre_compression", "response": response.content[0].text} + ) + log_file.write(f"USER: {session.prompts['precompression']}\n\n") log_file.write(f"ASSISTANT: {response.content[0].text}\n\n") log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") log_file.write("=" * 80 + "\n\n") - - session_logger.log_agent_response(response.content[0].text, "pre_compression_response") - session_logger.log_context_stats(f"{total_tokens}/{self.max_tokens}", f"Tokens: {total_tokens:,}") - + + session_logger.log_agent_response( + response.content[0].text, "pre_compression_response" + ) + session_logger.log_context_stats( + f"{total_tokens}/{self.max_tokens}", f"Tokens: {total_tokens:,}" + ) + milestones.append("pre_compression") stats["pre_compression_tokens"] = total_tokens - + # Phase 3: Deep-dive prompts (loop until token limit) self.logger.info("=" * 80) self.logger.info("PHASE 3: Deep-Dive Analysis") self.logger.info("=" * 80) - + deep_dive_count = 0 - - while deep_dive_count < self.max_turns and total_tokens < self.max_tokens * 0.9: + + while ( + deep_dive_count < self.max_turns + and total_tokens < self.max_tokens * 0.9 + ): deep_dive_count += 1 self.logger.info(f"Deep-dive iteration #{deep_dive_count}") - self.logger.info(f"Current tokens: {total_tokens:,} / {self.max_tokens:,} ({total_tokens/self.max_tokens*100:.1f}%)") + self.logger.info( + f"Current tokens: {total_tokens:,} / {self.max_tokens:,} ({total_tokens / self.max_tokens * 100:.1f}%)" + ) self.logger.info(f"Turn: {deep_dive_count} / {self.max_turns}") - - messages.append({"role": "user", "content": session.prompts["deepdive"]}) - + + messages.append( + {"role": "user", "content": session.prompts["deepdive"]} + ) + try: response = client.messages.create( - model=self.model, - max_tokens=4096, - messages=messages + model=self.model, max_tokens=4096, messages=messages + ) + + total_tokens += ( + response.usage.input_tokens + response.usage.output_tokens + ) + messages.append( + {"role": "assistant", "content": response.content[0].text} + ) + responses.append( + { + "phase": f"deep_dive_{deep_dive_count}", + "response": response.content[0].text, + } + ) + + log_file.write( + f"USER (Deep-dive #{deep_dive_count}): {session.prompts['deepdive']}\n\n" ) - - total_tokens += response.usage.input_tokens + response.usage.output_tokens - messages.append({"role": "assistant", "content": response.content[0].text}) - responses.append({"phase": f"deep_dive_{deep_dive_count}", "response": response.content[0].text}) - - log_file.write(f"USER (Deep-dive #{deep_dive_count}): {session.prompts['deepdive']}\n\n") log_file.write(f"ASSISTANT: {response.content[0].text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + stats[f"deep_dive_{deep_dive_count}_tokens"] = total_tokens - + except Exception as e: - self.logger.error(f"Deep-dive iteration {deep_dive_count} failed: {e}") + self.logger.error( + f"Deep-dive iteration {deep_dive_count} failed: {e}" + ) if "maximum context length" in str(e).lower(): self.logger.info("🔴 Hit Claude's context limit") hit_limit = True break raise - + milestones.append("deep_dive_complete") stats["deep_dive_iterations"] = deep_dive_count - + if total_tokens >= self.max_tokens * 0.9: hit_limit = True - self.logger.info(f"🔴 Reached token limit threshold: {total_tokens:,} / {self.max_tokens:,}") - + self.logger.info( + f"🔴 Reached token limit threshold: {total_tokens:,} / {self.max_tokens:,}" + ) + # Phase 4: Memory-only evaluation self.logger.info("=" * 80) self.logger.info("PHASE 4: Memory-Only Evaluation") self.logger.info("=" * 80) - - messages.append({"role": "user", "content": session.prompts["memory_only"]}) - + + messages.append( + {"role": "user", "content": session.prompts["memory_only"]} + ) + try: response = client.messages.create( - model=self.model, - max_tokens=4096, - messages=messages + model=self.model, max_tokens=4096, messages=messages + ) + + total_tokens += ( + response.usage.input_tokens + response.usage.output_tokens + ) + messages.append( + {"role": "assistant", "content": response.content[0].text} + ) + responses.append( + {"phase": "memory_only", "response": response.content[0].text} + ) + + log_file.write( + f"USER (Memory-only): {session.prompts['memory_only']}\n\n" ) - - total_tokens += response.usage.input_tokens + response.usage.output_tokens - messages.append({"role": "assistant", "content": response.content[0].text}) - responses.append({"phase": "memory_only", "response": response.content[0].text}) - - log_file.write(f"USER (Memory-only): {session.prompts['memory_only']}\n\n") log_file.write(f"ASSISTANT: {response.content[0].text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + milestones.append("memory_only") - + except Exception as e: self.logger.error(f"Memory-only phase failed: {e}") if "maximum context length" in str(e).lower(): self.logger.warning("Cannot continue - context limit exceeded") else: raise - + # Phase 5: Evaluator questions self.logger.info("=" * 80) self.logger.info("PHASE 5: Evaluator Questions") self.logger.info("=" * 80) - - messages.append({"role": "user", "content": session.prompts["evaluator_set"]}) - + + messages.append( + {"role": "user", "content": session.prompts["evaluator_set"]} + ) + try: response = client.messages.create( - model=self.model, - max_tokens=4096, - messages=messages + model=self.model, max_tokens=4096, messages=messages + ) + + total_tokens += ( + response.usage.input_tokens + response.usage.output_tokens + ) + messages.append( + {"role": "assistant", "content": response.content[0].text} + ) + responses.append( + {"phase": "evaluation", "response": response.content[0].text} + ) + + log_file.write( + f"USER (Evaluation): {session.prompts['evaluator_set']}\n\n" ) - - total_tokens += response.usage.input_tokens + response.usage.output_tokens - messages.append({"role": "assistant", "content": response.content[0].text}) - responses.append({"phase": "evaluation", "response": response.content[0].text}) - - log_file.write(f"USER (Evaluation): {session.prompts['evaluator_set']}\n\n") log_file.write(f"ASSISTANT: {response.content[0].text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + milestones.append("evaluation_complete") - + except Exception as e: self.logger.error(f"Evaluation phase failed: {e}") if "maximum context length" in str(e).lower(): self.logger.warning("Cannot continue - context limit exceeded") else: raise - + milestones.append("session_complete") - + except Exception as e: self.logger.error(f"Session error: {e}", exc_info=True) raise AgentExecutionError(self.name.value, f"Session error: {e}") - + # Prepare final statistics - stats.update({ - "total_tokens": str(total_tokens), - "hit_limit": str(hit_limit), - "max_tokens_configured": str(self.max_tokens), - "detection_method": "api_token_tracking", - }) - + stats.update( + { + "total_tokens": str(total_tokens), + "hit_limit": str(hit_limit), + "max_tokens_configured": str(self.max_tokens), + "detection_method": "api_token_tracking", + } + ) + return { "artifacts": {}, # Will be populated by caller "stats": stats, diff --git a/app/agents/gemini_agent.py b/app/agents/gemini_agent.py index d98e6db..d569183 100644 --- a/app/agents/gemini_agent.py +++ b/app/agents/gemini_agent.py @@ -1,46 +1,49 @@ """Google Gemini AI agent adapter using Google SDK.""" -import logging import asyncio +import logging from pathlib import Path -from typing import Dict, Any, Optional, List +from typing import Any import google.generativeai as genai -from app.domain.entities import AgentName from app.agents.base import ( - AgentAdapter, AgentSession, AgentCapabilities, - AgentMetadata, AgentExecutionError + AgentAdapter, + AgentCapabilities, + AgentExecutionError, + AgentMetadata, + AgentSession, ) from app.config import settings +from app.domain.entities import AgentName logger = logging.getLogger(__name__) class GeminiAgent(AgentAdapter): """Google Gemini AI agent adapter using Google SDK for direct API interaction.""" - + def __init__(self): super().__init__(AgentName.GEMINI, "gemini") # No binary needed - + if settings.google_api_key: genai.configure(api_key=settings.google_api_key) - + self.model_name = settings.gemini_model self.max_tokens = settings.max_context_tokens self.max_turns = settings.max_turns self.session_timeout = settings.agent_session_timeout - + def validate_installation(self) -> bool: """Validate that Google API key is configured.""" if not settings.google_api_key: self.logger.error("Google API key not configured") return False - + self.logger.info("Gemini (Google SDK) validation successful") return True - - def get_version_info(self) -> Dict[str, str]: + + def get_version_info(self) -> dict[str, str]: """Get Gemini version and system information.""" return { "model": self.model_name, @@ -48,274 +51,358 @@ def get_version_info(self) -> Dict[str, str]: "api_configured": str(bool(settings.google_api_key)), "sdk": "google-generativeai", } - + def _load_repo_files(self, repo_dir: Path, max_files: int = 50) -> str: """Load repository files into a context string with token limit.""" self.logger.info(f"Loading repository files from {repo_dir}") - + # Limit initial context to ~50K tokens (leaving room for conversation) MAX_CONTEXT_TOKENS = 50000 - + # Common code file extensions code_extensions = { - '.py', '.js', '.ts', '.tsx', '.jsx', '.java', '.cpp', '.c', '.h', - '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.cs', '.scala', - '.md', '.json', '.yaml', '.yml', '.toml', '.xml', '.html', '.css' + ".py", + ".js", + ".ts", + ".tsx", + ".jsx", + ".java", + ".cpp", + ".c", + ".h", + ".go", + ".rs", + ".rb", + ".php", + ".swift", + ".kt", + ".cs", + ".scala", + ".md", + ".json", + ".yaml", + ".yml", + ".toml", + ".xml", + ".html", + ".css", } - + files_content = [] file_count = 0 total_tokens = 0 - + try: - for file_path in repo_dir.rglob('*'): + for file_path in repo_dir.rglob("*"): if file_count >= max_files: break - + # Skip directories and hidden files - if file_path.is_dir() or file_path.name.startswith('.'): + if file_path.is_dir() or file_path.name.startswith("."): continue - + # Skip common non-code directories - if any(part in file_path.parts for part in ['.git', 'node_modules', '__pycache__', 'venv', 'dist', 'build']): + if any( + part in file_path.parts + for part in [ + ".git", + "node_modules", + "__pycache__", + "venv", + "dist", + "build", + ] + ): continue - + # Only include code files if file_path.suffix not in code_extensions: continue - + try: relative_path = file_path.relative_to(repo_dir) - content = file_path.read_text(encoding='utf-8', errors='ignore') - + content = file_path.read_text(encoding="utf-8", errors="ignore") + # Estimate tokens (rough: 1 token ~= 4 characters) file_tokens = len(content) // 4 - + # Stop if we would exceed token limit if total_tokens + file_tokens > MAX_CONTEXT_TOKENS: - self.logger.info(f"Stopping at {file_count} files to stay within {MAX_CONTEXT_TOKENS} token limit") + self.logger.info( + f"Stopping at {file_count} files to stay within {MAX_CONTEXT_TOKENS} token limit" + ) break - - files_content.append(f"### File: {relative_path}\n```{file_path.suffix[1:]}\n{content}\n```\n") + + files_content.append( + f"### File: {relative_path}\n```{file_path.suffix[1:]}\n{content}\n```\n" + ) file_count += 1 total_tokens += file_tokens - + except Exception as e: self.logger.warning(f"Could not read {file_path}: {e}") continue - + except Exception as e: self.logger.error(f"Error loading repository files: {e}") return f"Error loading repository: {e}" - - self.logger.info(f"Loaded {file_count} files (~{total_tokens} tokens) from repository") - + + self.logger.info( + f"Loaded {file_count} files (~{total_tokens} tokens) from repository" + ) + if not files_content: return "No code files found in repository." - - return f"# Repository Code\n\n" + "\n\n".join(files_content) - - def run_session(self, session: AgentSession) -> Dict[str, Any]: + + return "# Repository Code\n\n" + "\n\n".join(files_content) + + def run_session(self, session: AgentSession) -> dict[str, Any]: """Run complete Gemini session using Google SDK.""" self.setup_output_directory(session.output_dir) - + # Create transcript file transcript_path = session.output_dir / "transcript.txt" - + try: # Run async session result = asyncio.run(self._run_async_session(session, transcript_path)) - + # Add file paths to result result["artifacts"]["transcript"] = str(transcript_path) - + return result - + except Exception as e: self.logger.error(f"Gemini session failed: {e}", exc_info=True) return self.handle_error(e, session) - + async def _run_async_session( - self, - session: AgentSession, - transcript_path: Path - ) -> Dict[str, Any]: + self, session: AgentSession, transcript_path: Path + ) -> dict[str, Any]: """Run the async Gemini session using Google SDK.""" - + milestones = [] stats = {} responses = [] total_tokens = 0 hit_limit = False - + try: with open(transcript_path, "w", encoding="utf-8") as log_file: # Create model instance model = genai.GenerativeModel(self.model_name) - + # Phase 1: Load repository context self.logger.info("=" * 80) self.logger.info("PHASE 1: Loading Repository Context") self.logger.info("=" * 80) - + repo_context = self._load_repo_files(session.repo_dir) - log_file.write(f"Repository Context Loaded: {len(repo_context)} characters\n") + log_file.write( + f"Repository Context Loaded: {len(repo_context)} characters\n" + ) log_file.write("=" * 80 + "\n\n") - + # Start chat session chat = model.start_chat(history=[]) - + # Initial context loading response = await chat.send_message_async( f"{repo_context}\n\nThis is a code repository. Please analyze it and be ready to answer questions about it." ) - - if hasattr(response, 'usage_metadata'): + + if hasattr(response, "usage_metadata"): total_tokens += response.usage_metadata.prompt_token_count total_tokens += response.usage_metadata.candidates_token_count - + log_file.write(f"ASSISTANT: {response.text}\n\n") log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") log_file.write("=" * 80 + "\n\n") - + milestones.append("repo_loaded") stats["initial_tokens"] = total_tokens - + # Phase 2: Pre-compression prompt self.logger.info("=" * 80) self.logger.info("PHASE 2: Pre-Compression Analysis") self.logger.info("=" * 80) - - response = await chat.send_message_async(session.prompts["precompression"]) - - if hasattr(response, 'usage_metadata'): + + response = await chat.send_message_async( + session.prompts["precompression"] + ) + + if hasattr(response, "usage_metadata"): total_tokens += response.usage_metadata.prompt_token_count total_tokens += response.usage_metadata.candidates_token_count - - responses.append({"phase": "pre_compression", "response": response.text}) - + + responses.append( + {"phase": "pre_compression", "response": response.text} + ) + log_file.write(f"USER: {session.prompts['precompression']}\n\n") log_file.write(f"ASSISTANT: {response.text}\n\n") log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") log_file.write("=" * 80 + "\n\n") - + milestones.append("pre_compression") stats["pre_compression_tokens"] = total_tokens - + # Phase 3: Deep-dive prompts (loop until token limit) self.logger.info("=" * 80) self.logger.info("PHASE 3: Deep-Dive Analysis") self.logger.info("=" * 80) - + deep_dive_count = 0 - - while deep_dive_count < self.max_turns and total_tokens < self.max_tokens * 0.9: + + while ( + deep_dive_count < self.max_turns + and total_tokens < self.max_tokens * 0.9 + ): deep_dive_count += 1 self.logger.info(f"Deep-dive iteration #{deep_dive_count}") - self.logger.info(f"Current tokens: {total_tokens:,} / {self.max_tokens:,} ({total_tokens/self.max_tokens*100:.1f}%)") + self.logger.info( + f"Current tokens: {total_tokens:,} / {self.max_tokens:,} ({total_tokens / self.max_tokens * 100:.1f}%)" + ) self.logger.info(f"Turn: {deep_dive_count} / {self.max_turns}") - + try: - response = await chat.send_message_async(session.prompts["deepdive"]) - - if hasattr(response, 'usage_metadata'): + response = await chat.send_message_async( + session.prompts["deepdive"] + ) + + if hasattr(response, "usage_metadata"): total_tokens += response.usage_metadata.prompt_token_count - total_tokens += response.usage_metadata.candidates_token_count - - responses.append({"phase": f"deep_dive_{deep_dive_count}", "response": response.text}) - - log_file.write(f"USER (Deep-dive #{deep_dive_count}): {session.prompts['deepdive']}\n\n") + total_tokens += ( + response.usage_metadata.candidates_token_count + ) + + responses.append( + { + "phase": f"deep_dive_{deep_dive_count}", + "response": response.text, + } + ) + + log_file.write( + f"USER (Deep-dive #{deep_dive_count}): {session.prompts['deepdive']}\n\n" + ) log_file.write(f"ASSISTANT: {response.text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + stats[f"deep_dive_{deep_dive_count}_tokens"] = total_tokens - + except Exception as e: - self.logger.error(f"Deep-dive iteration {deep_dive_count} failed: {e}") + self.logger.error( + f"Deep-dive iteration {deep_dive_count} failed: {e}" + ) if "token limit" in str(e).lower() or "quota" in str(e).lower(): self.logger.info("🔴 Hit Gemini's token/quota limit") hit_limit = True break raise - + milestones.append("deep_dive_complete") stats["deep_dive_iterations"] = deep_dive_count - + if total_tokens >= self.max_tokens * 0.9: hit_limit = True - self.logger.info(f"🔴 Reached token limit threshold: {total_tokens:,} / {self.max_tokens:,}") - + self.logger.info( + f"🔴 Reached token limit threshold: {total_tokens:,} / {self.max_tokens:,}" + ) + # Phase 4: Memory-only evaluation self.logger.info("=" * 80) self.logger.info("PHASE 4: Memory-Only Evaluation") self.logger.info("=" * 80) - + try: - response = await chat.send_message_async(session.prompts["memory_only"]) - - if hasattr(response, 'usage_metadata'): + response = await chat.send_message_async( + session.prompts["memory_only"] + ) + + if hasattr(response, "usage_metadata"): total_tokens += response.usage_metadata.prompt_token_count total_tokens += response.usage_metadata.candidates_token_count - - responses.append({"phase": "memory_only", "response": response.text}) - - log_file.write(f"USER (Memory-only): {session.prompts['memory_only']}\n\n") + + responses.append( + {"phase": "memory_only", "response": response.text} + ) + + log_file.write( + f"USER (Memory-only): {session.prompts['memory_only']}\n\n" + ) log_file.write(f"ASSISTANT: {response.text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + milestones.append("memory_only") - + except Exception as e: self.logger.error(f"Memory-only phase failed: {e}") if "token limit" in str(e).lower() or "quota" in str(e).lower(): - self.logger.warning("Cannot continue - token/quota limit exceeded") + self.logger.warning( + "Cannot continue - token/quota limit exceeded" + ) else: raise - + # Phase 5: Evaluator questions self.logger.info("=" * 80) self.logger.info("PHASE 5: Evaluator Questions") self.logger.info("=" * 80) - + try: - response = await chat.send_message_async(session.prompts["evaluator_set"]) - - if hasattr(response, 'usage_metadata'): + response = await chat.send_message_async( + session.prompts["evaluator_set"] + ) + + if hasattr(response, "usage_metadata"): total_tokens += response.usage_metadata.prompt_token_count total_tokens += response.usage_metadata.candidates_token_count - + responses.append({"phase": "evaluation", "response": response.text}) - - log_file.write(f"USER (Evaluation): {session.prompts['evaluator_set']}\n\n") + + log_file.write( + f"USER (Evaluation): {session.prompts['evaluator_set']}\n\n" + ) log_file.write(f"ASSISTANT: {response.text}\n\n") - log_file.write(f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n") + log_file.write( + f"Tokens used: {total_tokens:,} / {self.max_tokens:,}\n" + ) log_file.write("=" * 80 + "\n\n") - + milestones.append("evaluation_complete") - + except Exception as e: self.logger.error(f"Evaluation phase failed: {e}") if "token limit" in str(e).lower() or "quota" in str(e).lower(): - self.logger.warning("Cannot continue - token/quota limit exceeded") + self.logger.warning( + "Cannot continue - token/quota limit exceeded" + ) else: raise - + milestones.append("session_complete") - + except Exception as e: self.logger.error(f"Session error: {e}", exc_info=True) raise AgentExecutionError(self.name.value, f"Session error: {e}") - + # Prepare final statistics - stats.update({ - "total_tokens": str(total_tokens), - "hit_limit": str(hit_limit), - "max_tokens_configured": str(self.max_tokens), - "detection_method": "api_token_tracking", - }) - + stats.update( + { + "total_tokens": str(total_tokens), + "hit_limit": str(hit_limit), + "max_tokens_configured": str(self.max_tokens), + "detection_method": "api_token_tracking", + } + ) + return { "artifacts": {}, # Will be populated by caller "stats": stats, diff --git a/app/agents/iflow_agent.py b/app/agents/iflow_agent.py index 29c86ca..5070076 100644 --- a/app/agents/iflow_agent.py +++ b/app/agents/iflow_agent.py @@ -3,20 +3,29 @@ import asyncio import logging import subprocess -import re -import json -from pathlib import Path -from typing import Dict, Any, Optional, List from datetime import datetime +from pathlib import Path +from typing import Any -from iflow_sdk import IFlowClient, IFlowOptions, AssistantMessage, TaskFinishMessage, StopReason, ToolCallMessage, PlanMessage +from iflow_sdk import ( + AssistantMessage, + IFlowClient, + IFlowOptions, + PlanMessage, + StopReason, + TaskFinishMessage, + ToolCallMessage, +) -from app.domain.entities import AgentName from app.agents.base import ( - AgentAdapter, AgentSession, CompressionDetector, AgentCapabilities, - AgentMetadata, AgentNotFoundError, AgentExecutionError, AgentTimeoutError + AgentAdapter, + AgentCapabilities, + AgentExecutionError, + AgentMetadata, + AgentSession, ) from app.config import settings +from app.domain.entities import AgentName from app.services.task_logger import AgentSessionLogger logger = logging.getLogger(__name__) @@ -24,7 +33,7 @@ class IFlowAgent(AgentAdapter): """iFlow AI agent adapter using Python SDK for direct interaction.""" - + def __init__(self): super().__init__(AgentName.IFLOW, settings.iflow_bin) self.max_tokens = settings.max_context_tokens @@ -32,81 +41,87 @@ def __init__(self): self.session_timeout = settings.agent_session_timeout self.iflow_process = None self.port = 8090 - + def validate_installation(self) -> bool: """Validate that iFlow CLI is installed and working.""" if not self.check_binary_exists(): self.logger.error(f"iFlow binary not found: {self.binary_path}") return False - + try: result = subprocess.run( [self.binary_path, "--version"], + check=False, capture_output=True, text=True, - timeout=10 + timeout=10, ) - + if result.returncode != 0: self.logger.error(f"iFlow version check failed: {result.stderr}") return False - + self.logger.info(f"iFlow validation successful: {result.stdout.strip()}") return True - + except Exception as e: self.logger.error(f"iFlow validation failed: {e}") return False - - def get_version_info(self) -> Dict[str, str]: + + def get_version_info(self) -> dict[str, str]: """Get iFlow version and system information.""" version_info = { "binary_path": self.binary_path, "available": str(self.check_binary_exists()), } - + try: result = subprocess.run( [self.binary_path, "--version"], + check=False, capture_output=True, text=True, - timeout=10 + timeout=10, ) - + if result.returncode == 0: version_info["version"] = result.stdout.strip() else: version_info["version_error"] = result.stderr - + except Exception as e: version_info["version_error"] = str(e) - + return version_info - + def _start_iflow_process(self, repo_dir: Path) -> subprocess.Popen: """Start iFlow CLI process with ACP mode and token limit.""" - self.logger.info(f"Starting iFlow process in {repo_dir} with {self.max_tokens} token limit") - + self.logger.info( + f"Starting iFlow process in {repo_dir} with {self.max_tokens} token limit" + ) + # Start iFlow with experimental ACP mode and token limit process = subprocess.Popen( [ self.binary_path, "--experimental-acp", - "--port", str(self.port), - "--max-tokens", str(self.max_tokens), - "--yolo" # Auto-accept actions + "--port", + str(self.port), + "--max-tokens", + str(self.max_tokens), + "--yolo", # Auto-accept actions ], cwd=str(repo_dir), stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True + text=True, ) - + self.iflow_process = process self.logger.info(f"iFlow process started with PID {process.pid}") return process - - def _get_context_stats(self) -> Optional[Dict[str, Any]]: + + def _get_context_stats(self) -> dict[str, Any] | None: """Get context statistics by running /stats model command via subprocess.""" try: # This is a workaround - we'd need to send this through the WebSocket @@ -115,34 +130,34 @@ def _get_context_stats(self) -> Optional[Dict[str, Any]]: except Exception as e: self.logger.warning(f"Failed to get context stats: {e}") return None - + async def _send_and_collect( self, client: IFlowClient, message: str, log_file, session_logger: AgentSessionLogger, - timeout: int = 300 - ) -> Dict[str, Any]: + timeout: int = 300, + ) -> dict[str, Any]: """Send a message and collect all responses until task finishes.""" self.logger.info(f"📤 Sending: {message[:100]}...") - + # Log to transcript file - log_file.write(f"\n{'='*80}\n") + log_file.write(f"\n{'=' * 80}\n") log_file.write(f"[{datetime.now().isoformat()}] USER: {message}\n") - log_file.write(f"{'='*80}\n") + log_file.write(f"{'=' * 80}\n") log_file.flush() - + # Log to UI stream session_logger.log_prompt_sent(message, "user_prompt") - + await client.send_message(message) - + response_text = [] tool_calls = [] plans = [] stop_reason = None - + try: async for msg in client.receive_messages(): if isinstance(msg, AssistantMessage): @@ -150,24 +165,28 @@ async def _send_and_collect( response_text.append(text) log_file.write(text) log_file.flush() - + elif isinstance(msg, ToolCallMessage): tool_info = { "status": msg.status, - "tool_name": msg.tool_name if hasattr(msg, 'tool_name') else None, - "label": msg.label if hasattr(msg, 'label') else None + "tool_name": msg.tool_name + if hasattr(msg, "tool_name") + else None, + "label": msg.label if hasattr(msg, "label") else None, } tool_calls.append(tool_info) log_file.write(f"\n[TOOL CALL: {tool_info}]\n") log_file.flush() - + elif isinstance(msg, PlanMessage): plan_info = { "entries": [ { "content": entry.content, "status": entry.status, - "priority": entry.priority if hasattr(entry, 'priority') else None + "priority": entry.priority + if hasattr(entry, "priority") + else None, } for entry in msg.entries ] @@ -175,55 +194,56 @@ async def _send_and_collect( plans.append(plan_info) log_file.write(f"\n[PLAN: {len(msg.entries)} entries]\n") log_file.flush() - + elif isinstance(msg, TaskFinishMessage): stop_reason = msg.stop_reason log_file.write(f"\n[TASK FINISHED: {stop_reason}]\n") log_file.flush() break - - except asyncio.TimeoutError: + + except TimeoutError: self.logger.warning(f"Timeout waiting for response after {timeout}s") stop_reason = "timeout" - + full_response = "".join(response_text) - + # Log response to UI stream session_logger.log_agent_response( - full_response, - f"response_{len(tool_calls)}_tools_{len(plans)}_plans" + full_response, f"response_{len(tool_calls)}_tools_{len(plans)}_plans" ) - + return { "response": full_response, "tool_calls": tool_calls, "plans": plans, "stop_reason": stop_reason, - "hit_token_limit": stop_reason == StopReason.MAX_TOKENS + "hit_token_limit": stop_reason == StopReason.MAX_TOKENS, } - - def run_session(self, session: AgentSession) -> Dict[str, Any]: + + def run_session(self, session: AgentSession) -> dict[str, Any]: """Run complete iFlow session using SDK.""" self.setup_output_directory(session.output_dir) - + # Create transcript file transcript_path = session.output_dir / "transcript.txt" - + # Create session logger for UI streaming session_logger = AgentSessionLogger(session.task_id, "iflow") - + try: # Run async session - result = asyncio.run(self._run_async_session(session, transcript_path, session_logger)) - + result = asyncio.run( + self._run_async_session(session, transcript_path, session_logger) + ) + # Add file paths to result result["artifacts"]["transcript"] = str(transcript_path) - + # Close session logger session_logger.close_session("completed", result.get("artifacts", {})) - + return result - + except Exception as e: self.logger.error(f"iFlow session failed: {e}", exc_info=True) session_logger.close_session("failed", {"error": str(e)}) @@ -238,155 +258,192 @@ def run_session(self, session: AgentSession) -> Dict[str, Any]: except subprocess.TimeoutExpired: self.logger.warning("Force killing iFlow process") self.iflow_process.kill() - + async def _run_async_session( self, session: AgentSession, transcript_path: Path, - session_logger: AgentSessionLogger - ) -> Dict[str, Any]: + session_logger: AgentSessionLogger, + ) -> dict[str, Any]: """Run the async iFlow session using SDK.""" - + milestones = [] stats = {} responses = [] total_tokens_estimate = 0 compression_detected = False - + # Start iFlow process self._start_iflow_process(session.repo_dir) - + # Wait for iFlow to start await asyncio.sleep(5) milestones.append("iflow_started") - + try: with open(transcript_path, "w", encoding="utf-8") as log_file: # Connect via SDK options = IFlowOptions( auto_start_process=False, url=f"ws://localhost:{self.port}/acp", - timeout=float(self.session_timeout) + timeout=float(self.session_timeout), ) - + self.logger.info(f"Connecting to iFlow at {options.url}") - + async with IFlowClient(options) as client: milestones.append("sdk_connected") - + # Phase 1: Initialize repository with /init self.logger.info("=" * 80) self.logger.info("PHASE 1: Repository Initialization") self.logger.info("=" * 80) - + init_result = await self._send_and_collect( client, "/init", log_file, session_logger, timeout=600 ) responses.append({"phase": "init", **init_result}) milestones.append("repo_initialized") - + # Estimate tokens (rough: 1 token ≈ 4 chars) total_tokens_estimate += len(init_result["response"]) // 4 stats["init_tokens_estimate"] = total_tokens_estimate - + # Phase 2: Pre-compression prompt self.logger.info("=" * 80) self.logger.info("PHASE 2: Pre-Compression Analysis") self.logger.info("=" * 80) - + pre_result = await self._send_and_collect( - client, session.prompts.get("pre") or session.prompts.get("precompression", ""), log_file, session_logger, timeout=300 + client, + session.prompts.get("pre") + or session.prompts.get("precompression", ""), + log_file, + session_logger, + timeout=300, ) responses.append({"phase": "pre_compression", **pre_result}) milestones.append("pre_compression") - + total_tokens_estimate += len(pre_result["response"]) // 4 stats["pre_compression_tokens_estimate"] = total_tokens_estimate - + # Phase 3: Deep-dive prompts (loop until token limit or compression) self.logger.info("=" * 80) self.logger.info("PHASE 3: Deep-Dive Analysis") self.logger.info("=" * 80) - + deep_dive_count = 0 - + while deep_dive_count < self.max_turns: deep_dive_count += 1 self.logger.info(f"Deep-dive iteration #{deep_dive_count}") - + deep_result = await self._send_and_collect( - client, session.prompts.get("deep") or session.prompts.get("deepdive", ""), log_file, session_logger, timeout=300 + client, + session.prompts.get("deep") + or session.prompts.get("deepdive", ""), + log_file, + session_logger, + timeout=300, ) - responses.append({"phase": f"deep_dive_{deep_dive_count}", **deep_result}) - + responses.append( + {"phase": f"deep_dive_{deep_dive_count}", **deep_result} + ) + total_tokens_estimate += len(deep_result["response"]) // 4 - stats[f"deep_dive_{deep_dive_count}_tokens"] = total_tokens_estimate - + stats[f"deep_dive_{deep_dive_count}_tokens"] = ( + total_tokens_estimate + ) + # Log token usage hit_limit = deep_result["hit_token_limit"] - self.logger.info(f"📊 Token estimate: {total_tokens_estimate:,} / {self.max_tokens:,} ({total_tokens_estimate/self.max_tokens*100:.1f}%)") - self.logger.info(f"📊 Stop reason: {deep_result.get('stop_reason', 'unknown')} | Hit token limit: {hit_limit}") - self.logger.info(f"📊 Turn: {deep_dive_count} / {self.max_turns}") + self.logger.info( + f"📊 Token estimate: {total_tokens_estimate:,} / {self.max_tokens:,} ({total_tokens_estimate / self.max_tokens * 100:.1f}%)" + ) + self.logger.info( + f"📊 Stop reason: {deep_result.get('stop_reason', 'unknown')} | Hit token limit: {hit_limit}" + ) + self.logger.info( + f"📊 Turn: {deep_dive_count} / {self.max_turns}" + ) session_logger.log_context_stats( f"{total_tokens_estimate}/{self.max_tokens}", - f"Tokens: {total_tokens_estimate:,} ({total_tokens_estimate/self.max_tokens*100:.1f}%)" + f"Tokens: {total_tokens_estimate:,} ({total_tokens_estimate / self.max_tokens * 100:.1f}%)", ) - + # Check if we hit token limit (reported by iFlow SDK) if hit_limit: - self.logger.info("🔴 Token limit reached - iFlow will compress context on next interaction") + self.logger.info( + "🔴 Token limit reached - iFlow will compress context on next interaction" + ) compression_detected = True break - + # FALLBACK: Check if our estimate exceeds limit (even if iFlow doesn't report it) if total_tokens_estimate >= self.max_tokens: - self.logger.info(f"🔴 Token estimate exceeded limit ({total_tokens_estimate:,} >= {self.max_tokens:,}) - proceeding to memory-only evaluation") + self.logger.info( + f"🔴 Token estimate exceeded limit ({total_tokens_estimate:,} >= {self.max_tokens:,}) - proceeding to memory-only evaluation" + ) compression_detected = True break - + # Check if approaching limit (90% of max) if total_tokens_estimate > self.max_tokens * 0.9: - self.logger.info(f"⚠️ Approaching token limit: {total_tokens_estimate:,} / {self.max_tokens:,}") - + self.logger.info( + f"⚠️ Approaching token limit: {total_tokens_estimate:,} / {self.max_tokens:,}" + ) + milestones.append("deep_dive_complete") stats["deep_dive_iterations"] = deep_dive_count - + # Phase 4: Memory-only evaluation self.logger.info("=" * 80) self.logger.info("PHASE 4: Memory-Only Evaluation") self.logger.info("=" * 80) - + memory_result = await self._send_and_collect( - client, session.prompts["memory_only"], log_file, session_logger, timeout=300 + client, + session.prompts["memory_only"], + log_file, + session_logger, + timeout=300, ) responses.append({"phase": "memory_only", **memory_result}) milestones.append("memory_only") - + # Phase 5: Evaluator questions self.logger.info("=" * 80) self.logger.info("PHASE 5: Evaluator Questions") self.logger.info("=" * 80) - + eval_result = await self._send_and_collect( - client, session.prompts.get("eval") or session.prompts.get("evaluator_set", ""), log_file, session_logger, timeout=300 + client, + session.prompts.get("eval") + or session.prompts.get("evaluator_set", ""), + log_file, + session_logger, + timeout=300, ) responses.append({"phase": "evaluation", **eval_result}) milestones.append("evaluation_complete") - + milestones.append("session_complete") - + except Exception as e: self.logger.error(f"Session error: {e}", exc_info=True) raise AgentExecutionError(self.name.value, f"Session error: {e}") - + # Prepare final statistics - stats.update({ - "compression_detected": str(compression_detected), - "total_tokens_estimate": str(total_tokens_estimate), - "detection_method": "token_limit_based", - "max_tokens_configured": str(self.max_tokens), - }) - + stats.update( + { + "compression_detected": str(compression_detected), + "total_tokens_estimate": str(total_tokens_estimate), + "detection_method": "token_limit_based", + "max_tokens_configured": str(self.max_tokens), + } + ) + return { "artifacts": {}, # Will be populated by caller "stats": stats, diff --git a/app/agents/registry.py b/app/agents/registry.py index e883155..5c9915d 100644 --- a/app/agents/registry.py +++ b/app/agents/registry.py @@ -1,92 +1,88 @@ """Agent registry and plugin discovery system.""" import logging -from typing import Dict, List, Optional, Type -from app.domain.entities import AgentName from app.agents.base import AgentAdapter, AgentMetadata, AgentNotFoundError -from app.config import settings +from app.domain.entities import AgentName logger = logging.getLogger(__name__) class AgentRegistry: """Registry for managing AI agent adapters.""" - + def __init__(self): - self._agents: Dict[AgentName, Type[AgentAdapter]] = {} - self._metadata: Dict[AgentName, AgentMetadata] = {} - self._instances: Dict[AgentName, AgentAdapter] = {} - + self._agents: dict[AgentName, type[AgentAdapter]] = {} + self._metadata: dict[AgentName, AgentMetadata] = {} + self._instances: dict[AgentName, AgentAdapter] = {} + def register_agent( self, - agent_class: Type[AgentAdapter], + agent_class: type[AgentAdapter], metadata: AgentMetadata, - force: bool = False + force: bool = False, ) -> None: """Register an agent adapter class.""" agent_name = metadata.name - + if agent_name in self._agents and not force: logger.warning(f"Agent {agent_name.value} already registered, skipping") return - + self._agents[agent_name] = agent_class self._metadata[agent_name] = metadata - + logger.info(f"Registered agent: {agent_name.value}") - + def unregister_agent(self, agent_name: AgentName) -> None: """Unregister an agent adapter.""" if agent_name in self._agents: del self._agents[agent_name] del self._metadata[agent_name] - + # Clean up instance if exists if agent_name in self._instances: del self._instances[agent_name] - + logger.info(f"Unregistered agent: {agent_name.value}") - + def get_agent(self, agent_name: AgentName) -> AgentAdapter: """Get agent adapter instance (singleton pattern).""" if agent_name not in self._agents: raise AgentNotFoundError( - agent_name.value, - f"Agent {agent_name.value} not registered" + agent_name.value, f"Agent {agent_name.value} not registered" ) - + # Return cached instance if exists if agent_name in self._instances: return self._instances[agent_name] - + # Create new instance agent_class = self._agents[agent_name] try: instance = agent_class() - + # Validate installation if not instance.validate_installation(): raise AgentNotFoundError( agent_name.value, - f"Agent {agent_name.value} installation validation failed" + f"Agent {agent_name.value} installation validation failed", ) - + self._instances[agent_name] = instance logger.info(f"Created agent instance: {agent_name.value}") return instance - + except Exception as e: logger.error(f"Failed to create agent {agent_name.value}: {e}") raise AgentNotFoundError( - agent_name.value, - f"Failed to create agent instance: {e}" + agent_name.value, f"Failed to create agent instance: {e}" ) - - def get_available_agents(self) -> List[AgentName]: + + def get_available_agents(self) -> list[AgentName]: """Get list of available (registered and validated) agents.""" available = [] - + for agent_name in self._agents: try: agent = self.get_agent(agent_name) @@ -94,21 +90,21 @@ def get_available_agents(self) -> List[AgentName]: available.append(agent_name) except Exception as e: logger.warning(f"Agent {agent_name.value} not available: {e}") - + return available - - def get_agent_metadata(self, agent_name: AgentName) -> Optional[AgentMetadata]: + + def get_agent_metadata(self, agent_name: AgentName) -> AgentMetadata | None: """Get agent metadata.""" return self._metadata.get(agent_name) - - def get_all_metadata(self) -> Dict[AgentName, AgentMetadata]: + + def get_all_metadata(self) -> dict[AgentName, AgentMetadata]: """Get metadata for all registered agents.""" return self._metadata.copy() - - def validate_agents(self, agent_names: List[AgentName]) -> Dict[AgentName, bool]: + + def validate_agents(self, agent_names: list[AgentName]) -> dict[AgentName, bool]: """Validate multiple agents and return validation results.""" results = {} - + for agent_name in agent_names: try: agent = self.get_agent(agent_name) @@ -116,39 +112,44 @@ def validate_agents(self, agent_names: List[AgentName]) -> Dict[AgentName, bool] except Exception as e: logger.error(f"Validation failed for {agent_name.value}: {e}") results[agent_name] = False - + return results - + def auto_discover_agents(self) -> None: """Auto-discover and register agent adapters.""" logger.info("Starting agent auto-discovery...") - + # Import agent implementations try: - from app.agents.iflow_agent import IFlowAgent, IFLOW_METADATA + from app.agents.iflow_agent import IFLOW_METADATA, IFlowAgent + self.register_agent(IFlowAgent, IFLOW_METADATA) except ImportError as e: logger.warning(f"Failed to import iFlow agent: {e}") - + try: - from app.agents.claude_agent import ClaudeAgent, CLAUDE_METADATA + from app.agents.claude_agent import CLAUDE_METADATA, ClaudeAgent + self.register_agent(ClaudeAgent, CLAUDE_METADATA) except ImportError as e: logger.warning(f"Failed to import Claude agent: {e}") - + try: - from app.agents.gemini_agent import GeminiAgent, GEMINI_METADATA + from app.agents.gemini_agent import GEMINI_METADATA, GeminiAgent + self.register_agent(GeminiAgent, GEMINI_METADATA) except ImportError as e: logger.warning(f"Failed to import Gemini agent: {e}") - + available = self.get_available_agents() - logger.info(f"Auto-discovery complete. Available agents: {[a.value for a in available]}") - - def health_check(self) -> Dict[str, Dict[str, any]]: + logger.info( + f"Auto-discovery complete. Available agents: {[a.value for a in available]}" + ) + + def health_check(self) -> dict[str, dict[str, any]]: """Perform health check on all registered agents.""" health_data = {} - + for agent_name, agent_class in self._agents.items(): agent_health = { "registered": True, @@ -156,18 +157,18 @@ def health_check(self) -> Dict[str, Dict[str, any]]: "version_info": {}, "error": None, } - + try: agent = self.get_agent(agent_name) agent_health["available"] = agent.validate_installation() if agent_health["available"]: agent_health["version_info"] = agent.get_version_info() - + except Exception as e: agent_health["error"] = str(e) - + health_data[agent_name.value] = agent_health - + return health_data @@ -181,9 +182,7 @@ def get_agent_registry() -> AgentRegistry: def register_agent( - agent_class: Type[AgentAdapter], - metadata: AgentMetadata, - force: bool = False + agent_class: type[AgentAdapter], metadata: AgentMetadata, force: bool = False ) -> None: """Convenience function to register an agent.""" registry.register_agent(agent_class, metadata, force) @@ -194,11 +193,11 @@ def get_agent(agent_name: AgentName) -> AgentAdapter: return registry.get_agent(agent_name) -def validate_agent_list(agent_names: List[AgentName]) -> bool: +def validate_agent_list(agent_names: list[AgentName]) -> bool: """Validate that all agents in the list are available.""" if not agent_names: return False - + validation_results = registry.validate_agents(agent_names) return all(validation_results.values()) @@ -206,10 +205,12 @@ def validate_agent_list(agent_names: List[AgentName]) -> bool: def initialize_agent_registry() -> None: """Initialize the agent registry with auto-discovery.""" registry.auto_discover_agents() - + # Log available agents available = registry.get_available_agents() if available: - logger.info(f"Initialized agent registry with agents: {[a.value for a in available]}") + logger.info( + f"Initialized agent registry with agents: {[a.value for a in available]}" + ) else: logger.warning("No agents are available after initialization") diff --git a/app/config.py b/app/config.py index 96c20d5..5484e4a 100644 --- a/app/config.py +++ b/app/config.py @@ -1,7 +1,7 @@ """Application configuration using Pydantic Settings.""" from pathlib import Path -from typing import List, Optional + from pydantic import Field, PostgresDsn, RedisDsn from pydantic_settings import BaseSettings, SettingsConfigDict @@ -11,94 +11,115 @@ class Settings(BaseSettings): """Application settings with environment variable support.""" - + model_config = SettingsConfigDict( env_file=str(PROJECT_ROOT / ".env"), env_file_encoding="utf-8", case_sensitive=False, extra="ignore", - env_ignore_empty=False + env_ignore_empty=False, ) - + # Application app_name: str = Field(default="Memory-Break Orchestrator", alias="APP_NAME") app_version: str = Field(default="0.1.0", alias="APP_VERSION") debug: bool = Field(default=False, alias="DEBUG") - + # API Server host: str = Field(default="127.0.0.1", alias="HOST") port: int = Field(default=8000, alias="PORT") - + # Database database_url: PostgresDsn = Field( default="postgresql://user:password@localhost:5432/memory_break_db", - alias="DATABASE_URL" + alias="DATABASE_URL", ) - + # Redis - redis_url: RedisDsn = Field( - default="redis://localhost:6379/0", - alias="REDIS_URL" - ) - + redis_url: RedisDsn = Field(default="redis://localhost:6379/0", alias="REDIS_URL") + # File Storage run_root: str = Field(default="storage", alias="RUN_ROOT") max_files_per_task: int = Field(default=50, alias="MAX_FILES_PER_TASK") - + # Agent CLIs iflow_bin: str = Field(default="iflow", alias="IFLOW_BIN") claude_bin: str = Field(default="claude", alias="CLAUDE_BIN") gemini_bin: str = Field(default="gemini", alias="GEMINI_BIN") - + # iFlow Configuration - iflow_api_key: Optional[str] = Field(default=None, alias="IFLOW_API_KEY") - iflow_base_url: str = Field(default="https://apis.iflow.cn/v1", alias="IFLOW_BASE_URL") + iflow_api_key: str | None = Field(default=None, alias="IFLOW_API_KEY") + iflow_base_url: str = Field( + default="https://apis.iflow.cn/v1", alias="IFLOW_BASE_URL" + ) iflow_model_name: str = Field(default="qwen3-coder-plus", alias="IFLOW_MODEL_NAME") - + # Claude Configuration - claude_model: str = Field(default="claude-sonnet-4-5-20250929", alias="CLAUDE_MODEL") - + claude_model: str = Field( + default="claude-sonnet-4-5-20250929", alias="CLAUDE_MODEL" + ) + # Gemini Configuration gemini_model: str = Field(default="gemini-2.5-pro", alias="GEMINI_MODEL") - + # API Keys (for LLM Judge) - openai_api_key: Optional[str] = Field(default=None, alias="OPENAI_API_KEY") - anthropic_api_key: Optional[str] = Field(default=None, alias="ANTHROPIC_API_KEY") - google_api_key: Optional[str] = Field(default=None, alias="GOOGLE_API_KEY") - + openai_api_key: str | None = Field(default=None, alias="OPENAI_API_KEY") + anthropic_api_key: str | None = Field(default=None, alias="ANTHROPIC_API_KEY") + google_api_key: str | None = Field(default=None, alias="GOOGLE_API_KEY") + # Security - allowed_origins: List[str] = Field( + allowed_origins: list[str] = Field( default=["http://localhost:3000", "http://localhost:8000"], - alias="ALLOWED_ORIGINS" + alias="ALLOWED_ORIGINS", ) - + # Task Processing - task_timeout_seconds: int = Field(default=7200, alias="TASK_TIMEOUT_SECONDS") # 2 hours - agent_session_timeout: int = Field(default=3600, alias="AGENT_SESSION_TIMEOUT") # 1 hour - + task_timeout_seconds: int = Field( + default=7200, alias="TASK_TIMEOUT_SECONDS" + ) # 2 hours + agent_session_timeout: int = Field( + default=3600, alias="AGENT_SESSION_TIMEOUT" + ) # 1 hour + # Agent Token Limits (for fair comparison) - max_context_tokens: int = Field(default=200000, alias="MAX_CONTEXT_TOKENS") # 200K tokens for all agents - max_turns: int = Field(default=100, alias="MAX_TURNS") # Maximum deep-dive iterations - + max_context_tokens: int = Field( + default=200000, alias="MAX_CONTEXT_TOKENS" + ) # 200K tokens for all agents + max_turns: int = Field( + default=100, alias="MAX_TURNS" + ) # Maximum deep-dive iterations + # Compression Detection - compression_threshold_low: int = Field(default=30, alias="COMPRESSION_THRESHOLD_LOW") - compression_jump_threshold: int = Field(default=30, alias="COMPRESSION_JUMP_THRESHOLD") - + compression_threshold_low: int = Field( + default=30, alias="COMPRESSION_THRESHOLD_LOW" + ) + compression_jump_threshold: int = Field( + default=30, alias="COMPRESSION_JUMP_THRESHOLD" + ) + # Judge Configuration default_judge: str = Field(default="llm", alias="DEFAULT_JUDGE") # heuristic | llm judge_model: str = Field(default="gpt-4o", alias="JUDGE_MODEL") - + # Prompt Generation Configuration - use_gpt_prompts: bool = Field(default=True, alias="USE_GPT_PROMPTS") # Use GPT for prompt generation - prompt_model: str = Field(default="gpt-4o", alias="PROMPT_MODEL") # Use GPT-4o which supports temperature - prompt_temperature: float = Field(default=1.0, alias="PROMPT_TEMPERATURE") # Use default temperature for compatibility - prompt_max_tokens: int = Field(default=4000, alias="PROMPT_MAX_TOKENS") # Max tokens per prompt - + use_gpt_prompts: bool = Field( + default=True, alias="USE_GPT_PROMPTS" + ) # Use GPT for prompt generation + prompt_model: str = Field( + default="gpt-4o", alias="PROMPT_MODEL" + ) # Use GPT-4o which supports temperature + prompt_temperature: float = Field( + default=1.0, alias="PROMPT_TEMPERATURE" + ) # Use default temperature for compatibility + prompt_max_tokens: int = Field( + default=4000, alias="PROMPT_MAX_TOKENS" + ) # Max tokens per prompt + @property def database_url_str(self) -> str: """Get database URL as string.""" return str(self.database_url) - + @property def redis_url_str(self) -> str: """Get Redis URL as string.""" diff --git a/app/domain/entities.py b/app/domain/entities.py index 990730a..bf6812b 100644 --- a/app/domain/entities.py +++ b/app/domain/entities.py @@ -2,7 +2,6 @@ from datetime import datetime from enum import Enum -from typing import Dict, List, Optional from uuid import UUID, uuid4 from pydantic import BaseModel, Field, HttpUrl @@ -10,6 +9,7 @@ class TaskStatus(str, Enum): """Task execution status.""" + QUEUED = "queued" RUNNING = "running" JUDGING = "judging" @@ -19,6 +19,7 @@ class TaskStatus(str, Enum): class AgentRunStatus(str, Enum): """Agent run execution status.""" + QUEUED = "queued" RUNNING = "running" MEMORY_ONLY = "memory_only" @@ -29,6 +30,7 @@ class AgentRunStatus(str, Enum): class AgentName(str, Enum): """Supported AI agents.""" + IFLOW = "iflow" CLAUDE = "claude" GEMINI = "gemini" @@ -36,6 +38,7 @@ class AgentName(str, Enum): class RubricDimension(str, Enum): """Evaluation rubric dimensions.""" + AR = "AR" # Accurate Retrieval - Recall TTL = "TTL" # Test-Time Learning - Adapt LRU = "LRU" # Long-Range Understanding - Connect @@ -44,44 +47,44 @@ class RubricDimension(str, Enum): class Task(BaseModel): """Memory-break evaluation task.""" - + id: UUID = Field(default_factory=uuid4) pr_url: HttpUrl repo: str pr_number: int - agents: List[AgentName] - rubric: List[RubricDimension] = Field(default_factory=lambda: list(RubricDimension)) + agents: list[AgentName] + rubric: list[RubricDimension] = Field(default_factory=lambda: list(RubricDimension)) status: TaskStatus = TaskStatus.QUEUED max_files: int = Field(default=50, ge=1, le=1000) - + # Timestamps created_at: datetime = Field(default_factory=datetime.utcnow) updated_at: datetime = Field(default_factory=datetime.utcnow) - started_at: Optional[datetime] = None - completed_at: Optional[datetime] = None - + started_at: datetime | None = None + completed_at: datetime | None = None + # Metadata - changed_files: List[str] = Field(default_factory=list) - prompt_hash: Optional[str] = None - error_message: Optional[str] = None - + changed_files: list[str] = Field(default_factory=list) + prompt_hash: str | None = None + error_message: str | None = None + def mark_started(self) -> None: """Mark task as started.""" self.status = TaskStatus.RUNNING self.started_at = datetime.utcnow() self.updated_at = datetime.utcnow() - + def mark_judging(self) -> None: """Mark task as in judging phase.""" self.status = TaskStatus.JUDGING self.updated_at = datetime.utcnow() - + def mark_completed(self) -> None: """Mark task as completed.""" self.status = TaskStatus.DONE self.completed_at = datetime.utcnow() self.updated_at = datetime.utcnow() - + def mark_error(self, error_message: str) -> None: """Mark task as errored.""" self.status = TaskStatus.ERROR @@ -91,66 +94,66 @@ def mark_error(self, error_message: str) -> None: class AgentRun(BaseModel): """Individual agent execution within a task.""" - + id: UUID = Field(default_factory=uuid4) task_id: UUID agent: AgentName status: AgentRunStatus = AgentRunStatus.QUEUED - + # Execution milestones - milestones: Dict[str, datetime] = Field(default_factory=dict) - + milestones: dict[str, datetime] = Field(default_factory=dict) + # Artifacts and outputs - artifacts: Dict[str, str] = Field(default_factory=dict) # name -> file path - stats: Dict[str, str] = Field(default_factory=dict) # execution statistics - + artifacts: dict[str, str] = Field(default_factory=dict) # name -> file path + stats: dict[str, str] = Field(default_factory=dict) # execution statistics + # Timestamps created_at: datetime = Field(default_factory=datetime.utcnow) updated_at: datetime = Field(default_factory=datetime.utcnow) - started_at: Optional[datetime] = None - completed_at: Optional[datetime] = None - + started_at: datetime | None = None + completed_at: datetime | None = None + # Error handling - error_message: Optional[str] = None + error_message: str | None = None retry_count: int = 0 - + def add_milestone(self, name: str) -> None: """Add execution milestone.""" self.milestones[name] = datetime.utcnow() self.updated_at = datetime.utcnow() - + def add_artifact(self, name: str, file_path: str) -> None: """Add artifact file path.""" self.artifacts[name] = file_path self.updated_at = datetime.utcnow() - + def add_stat(self, key: str, value: str) -> None: """Add execution statistic.""" self.stats[key] = value self.updated_at = datetime.utcnow() - + def mark_started(self) -> None: """Mark agent run as started.""" self.status = AgentRunStatus.RUNNING self.started_at = datetime.utcnow() self.add_milestone("started") - + def mark_memory_only(self) -> None: """Mark agent run as entered memory-only mode.""" self.status = AgentRunStatus.MEMORY_ONLY self.add_milestone("memory_only") - + def mark_evaluating(self) -> None: """Mark agent run as in evaluation phase.""" self.status = AgentRunStatus.EVALUATING self.add_milestone("evaluating") - + def mark_completed(self) -> None: """Mark agent run as completed.""" self.status = AgentRunStatus.DONE self.completed_at = datetime.utcnow() self.add_milestone("completed") - + def mark_error(self, error_message: str) -> None: """Mark agent run as errored.""" self.status = AgentRunStatus.ERROR @@ -160,36 +163,36 @@ def mark_error(self, error_message: str) -> None: class Score(BaseModel): """Evaluation score for an agent run.""" - + id: UUID = Field(default_factory=uuid4) agent_run_id: UUID task_id: UUID agent: AgentName - + # Scoring - scores: Dict[RubricDimension, float] = Field(default_factory=dict) + scores: dict[RubricDimension, float] = Field(default_factory=dict) overall_score: float = 0.0 passed: bool = False - + # Judge information judge_type: str # "heuristic" or "llm" - judge_model: Optional[str] = None + judge_model: str | None = None rationale: str = "" - + # A/B comparison data - pre_compression_answers: Dict[str, str] = Field(default_factory=dict) - post_compression_answers: Dict[str, str] = Field(default_factory=dict) - + pre_compression_answers: dict[str, str] = Field(default_factory=dict) + post_compression_answers: dict[str, str] = Field(default_factory=dict) + # Timestamps created_at: datetime = Field(default_factory=datetime.utcnow) - + def calculate_overall_score(self) -> None: """Calculate overall score and pass/fail status.""" if not self.scores: self.overall_score = 0.0 self.passed = False return - + self.overall_score = sum(self.scores.values()) / len(self.scores) # Pass if >= 3/4 dimensions score >= 0.5 (or similar threshold) passing_scores = sum(1 for score in self.scores.values() if score >= 0.5) @@ -198,18 +201,18 @@ def calculate_overall_score(self) -> None: class Artifact(BaseModel): """File artifact generated during agent execution.""" - + id: UUID = Field(default_factory=uuid4) agent_run_id: UUID task_id: UUID agent: AgentName - + # File information name: str file_path: str file_type: str # "transcript", "export", "scores", etc. - size_bytes: Optional[int] = None - + size_bytes: int | None = None + # Metadata created_at: datetime = Field(default_factory=datetime.utcnow) - checksum: Optional[str] = None + checksum: str | None = None diff --git a/app/infrastructure/database.py b/app/infrastructure/database.py index c9beb27..15acc56 100644 --- a/app/infrastructure/database.py +++ b/app/infrastructure/database.py @@ -1,123 +1,149 @@ """Database configuration and models using SQLModel.""" from datetime import datetime -from typing import Dict, List, Optional from uuid import UUID, uuid4 -from sqlmodel import JSON, Column, Field, SQLModel, create_engine, Session -from sqlalchemy import String, Text, Integer, Float, Boolean, DateTime, UUID as SQLUUID +from sqlalchemy import UUID as SQLUUID +from sqlalchemy import Boolean, DateTime, Float, Integer, String, Text +from sqlmodel import JSON, Column, Field, Session, SQLModel, create_engine from app.config import settings -from app.domain.entities import ( - TaskStatus, AgentRunStatus, AgentName, RubricDimension -) +from app.domain.entities import AgentName, AgentRunStatus, TaskStatus class TaskDB(SQLModel, table=True): """Database model for Task.""" - + __tablename__ = "tasks" - + # Primary fields - id: UUID = Field(default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True)) + id: UUID = Field( + default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True) + ) pr_url: str = Field(sa_column=Column(String(512), nullable=False)) repo: str = Field(sa_column=Column(String(256), nullable=False)) pr_number: int = Field(sa_column=Column(Integer, nullable=False)) status: TaskStatus = Field(sa_column=Column(String(50), nullable=False)) max_files: int = Field(default=50, sa_column=Column(Integer, nullable=False)) - + # JSON fields - agents: List[str] = Field(default_factory=list, sa_column=Column(JSON)) - rubric: List[str] = Field(default_factory=list, sa_column=Column(JSON)) - changed_files: List[str] = Field(default_factory=list, sa_column=Column(JSON)) - + agents: list[str] = Field(default_factory=list, sa_column=Column(JSON)) + rubric: list[str] = Field(default_factory=list, sa_column=Column(JSON)) + changed_files: list[str] = Field(default_factory=list, sa_column=Column(JSON)) + # Metadata - prompt_hash: Optional[str] = Field(default=None, sa_column=Column(String(64))) - error_message: Optional[str] = Field(default=None, sa_column=Column(Text)) - + prompt_hash: str | None = Field(default=None, sa_column=Column(String(64))) + error_message: str | None = Field(default=None, sa_column=Column(Text)) + # Timestamps - created_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) - updated_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) - started_at: Optional[datetime] = Field(default=None, sa_column=Column(DateTime)) - completed_at: Optional[datetime] = Field(default=None, sa_column=Column(DateTime)) + created_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) + updated_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) + started_at: datetime | None = Field(default=None, sa_column=Column(DateTime)) + completed_at: datetime | None = Field(default=None, sa_column=Column(DateTime)) class AgentRunDB(SQLModel, table=True): """Database model for AgentRun.""" - + __tablename__ = "agent_runs" - + # Primary fields - id: UUID = Field(default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True)) + id: UUID = Field( + default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True) + ) task_id: UUID = Field(sa_column=Column(SQLUUID(as_uuid=True), nullable=False)) agent: AgentName = Field(sa_column=Column(String(50), nullable=False)) status: AgentRunStatus = Field(sa_column=Column(String(50), nullable=False)) - + # JSON fields - milestones: Dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) # datetime as ISO string - artifacts: Dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) - stats: Dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) - + milestones: dict[str, str] = Field( + default_factory=dict, sa_column=Column(JSON) + ) # datetime as ISO string + artifacts: dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) + stats: dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) + # Error handling - error_message: Optional[str] = Field(default=None, sa_column=Column(Text)) + error_message: str | None = Field(default=None, sa_column=Column(Text)) retry_count: int = Field(default=0, sa_column=Column(Integer, nullable=False)) - + # Timestamps - created_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) - updated_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) - started_at: Optional[datetime] = Field(default=None, sa_column=Column(DateTime)) - completed_at: Optional[datetime] = Field(default=None, sa_column=Column(DateTime)) + created_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) + updated_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) + started_at: datetime | None = Field(default=None, sa_column=Column(DateTime)) + completed_at: datetime | None = Field(default=None, sa_column=Column(DateTime)) class ScoreDB(SQLModel, table=True): """Database model for Score.""" - + __tablename__ = "scores" - + # Primary fields - id: UUID = Field(default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True)) + id: UUID = Field( + default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True) + ) agent_run_id: UUID = Field(sa_column=Column(SQLUUID(as_uuid=True), nullable=False)) task_id: UUID = Field(sa_column=Column(SQLUUID(as_uuid=True), nullable=False)) agent: AgentName = Field(sa_column=Column(String(50), nullable=False)) - + # Scoring - scores: Dict[str, float] = Field(default_factory=dict, sa_column=Column(JSON)) # dimension -> score + scores: dict[str, float] = Field( + default_factory=dict, sa_column=Column(JSON) + ) # dimension -> score overall_score: float = Field(default=0.0, sa_column=Column(Float, nullable=False)) passed: bool = Field(default=False, sa_column=Column(Boolean, nullable=False)) - + # Judge information judge_type: str = Field(sa_column=Column(String(50), nullable=False)) - judge_model: Optional[str] = Field(default=None, sa_column=Column(String(100))) + judge_model: str | None = Field(default=None, sa_column=Column(String(100))) rationale: str = Field(default="", sa_column=Column(Text)) - + # A/B comparison data - pre_compression_answers: Dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) - post_compression_answers: Dict[str, str] = Field(default_factory=dict, sa_column=Column(JSON)) - + pre_compression_answers: dict[str, str] = Field( + default_factory=dict, sa_column=Column(JSON) + ) + post_compression_answers: dict[str, str] = Field( + default_factory=dict, sa_column=Column(JSON) + ) + # Timestamps - created_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) + created_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) class ArtifactDB(SQLModel, table=True): """Database model for Artifact.""" - + __tablename__ = "artifacts" - + # Primary fields - id: UUID = Field(default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True)) + id: UUID = Field( + default_factory=uuid4, sa_column=Column(SQLUUID(as_uuid=True), primary_key=True) + ) agent_run_id: UUID = Field(sa_column=Column(SQLUUID(as_uuid=True), nullable=False)) task_id: UUID = Field(sa_column=Column(SQLUUID(as_uuid=True), nullable=False)) agent: AgentName = Field(sa_column=Column(String(50), nullable=False)) - + # File information name: str = Field(sa_column=Column(String(255), nullable=False)) file_path: str = Field(sa_column=Column(String(1024), nullable=False)) file_type: str = Field(sa_column=Column(String(50), nullable=False)) - size_bytes: Optional[int] = Field(default=None, sa_column=Column(Integer)) - checksum: Optional[str] = Field(default=None, sa_column=Column(String(64))) - + size_bytes: int | None = Field(default=None, sa_column=Column(Integer)) + checksum: str | None = Field(default=None, sa_column=Column(String(64))) + # Timestamps - created_at: datetime = Field(default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False)) + created_at: datetime = Field( + default_factory=datetime.utcnow, sa_column=Column(DateTime, nullable=False) + ) # Database engine and session management @@ -142,10 +168,10 @@ def get_session() -> Session: class DatabaseManager: """Database operations manager.""" - + def __init__(self, session: Session): self.session = session - + def create_task(self, task_data: dict) -> TaskDB: """Create a new task.""" task = TaskDB(**task_data) @@ -153,26 +179,26 @@ def create_task(self, task_data: dict) -> TaskDB: self.session.commit() self.session.refresh(task) return task - - def get_task(self, task_id: UUID) -> Optional[TaskDB]: + + def get_task(self, task_id: UUID) -> TaskDB | None: """Get task by ID.""" return self.session.get(TaskDB, task_id) - - def update_task(self, task_id: UUID, updates: dict) -> Optional[TaskDB]: + + def update_task(self, task_id: UUID, updates: dict) -> TaskDB | None: """Update task.""" task = self.session.get(TaskDB, task_id) if not task: return None - + for key, value in updates.items(): if hasattr(task, key): setattr(task, key, value) - + task.updated_at = datetime.utcnow() self.session.commit() self.session.refresh(task) return task - + def create_agent_run(self, agent_run_data: dict) -> AgentRunDB: """Create a new agent run.""" agent_run = AgentRunDB(**agent_run_data) @@ -180,32 +206,33 @@ def create_agent_run(self, agent_run_data: dict) -> AgentRunDB: self.session.commit() self.session.refresh(agent_run) return agent_run - - def get_agent_run(self, agent_run_id: UUID) -> Optional[AgentRunDB]: + + def get_agent_run(self, agent_run_id: UUID) -> AgentRunDB | None: """Get agent run by ID.""" return self.session.get(AgentRunDB, agent_run_id) - - def get_agent_runs_for_task(self, task_id: UUID) -> List[AgentRunDB]: + + def get_agent_runs_for_task(self, task_id: UUID) -> list[AgentRunDB]: """Get all agent runs for a task.""" from sqlmodel import select + statement = select(AgentRunDB).where(AgentRunDB.task_id == task_id) return list(self.session.exec(statement).all()) - - def update_agent_run(self, agent_run_id: UUID, updates: dict) -> Optional[AgentRunDB]: + + def update_agent_run(self, agent_run_id: UUID, updates: dict) -> AgentRunDB | None: """Update agent run.""" agent_run = self.session.get(AgentRunDB, agent_run_id) if not agent_run: return None - + for key, value in updates.items(): if hasattr(agent_run, key): setattr(agent_run, key, value) - + agent_run.updated_at = datetime.utcnow() self.session.commit() self.session.refresh(agent_run) return agent_run - + def create_score(self, score_data: dict) -> ScoreDB: """Create a new score.""" score = ScoreDB(**score_data) @@ -213,7 +240,7 @@ def create_score(self, score_data: dict) -> ScoreDB: self.session.commit() self.session.refresh(score) return score - + def create_artifact(self, artifact_data: dict) -> ArtifactDB: """Create a new artifact.""" artifact = ArtifactDB(**artifact_data) @@ -221,38 +248,41 @@ def create_artifact(self, artifact_data: dict) -> ArtifactDB: self.session.commit() self.session.refresh(artifact) return artifact - - def list_tasks(self, page: int = 1, page_size: int = 20, status: Optional[str] = None) -> tuple[List[TaskDB], int]: + + def list_tasks( + self, page: int = 1, page_size: int = 20, status: str | None = None + ) -> tuple[list[TaskDB], int]: """List tasks with pagination and optional status filtering.""" from sqlmodel import select - + # Build query statement = select(TaskDB) - + # Apply status filter if provided if status: statement = statement.where(TaskDB.status == status) - + # Apply ordering (newest first) statement = statement.order_by(TaskDB.created_at.desc()) - + # Get total count (before pagination) count_statement = select(TaskDB) if status: count_statement = count_statement.where(TaskDB.status == status) total = len(list(self.session.exec(count_statement).all())) - + # Apply pagination offset = (page - 1) * page_size statement = statement.offset(offset).limit(page_size) - + # Execute query tasks = list(self.session.exec(statement).all()) - + return tasks, total - - def get_scores_for_agent_run(self, agent_run_id: UUID) -> Optional[ScoreDB]: + + def get_scores_for_agent_run(self, agent_run_id: UUID) -> ScoreDB | None: """Get scores for a specific agent run.""" from sqlmodel import select + statement = select(ScoreDB).where(ScoreDB.agent_run_id == agent_run_id) - return self.session.exec(statement).first() \ No newline at end of file + return self.session.exec(statement).first() diff --git a/app/infrastructure/queue.py b/app/infrastructure/queue.py index b18bd06..84e06c5 100644 --- a/app/infrastructure/queue.py +++ b/app/infrastructure/queue.py @@ -1,13 +1,11 @@ """Redis and RQ task queue configuration.""" import logging -from typing import Any, Dict, Optional -from uuid import UUID +from typing import Any import redis -from rq import Queue, Worker +from rq import Queue, Retry, Worker from rq.job import Job -from rq import Retry from app.config import settings @@ -18,8 +16,8 @@ settings.redis_url_str, decode_responses=False, # Handle encoding manually to prevent UTF-8 errors health_check_interval=30, - encoding='utf-8', - encoding_errors='strict', + encoding="utf-8", + encoding_errors="strict", ) # Task queues with different priorities @@ -37,24 +35,24 @@ class QueueManager: """Manages task queue operations.""" - + def __init__(self): self.redis = redis_client self.queues = QUEUE_MAPPING - + def enqueue_task( self, func: str, args: tuple = (), - kwargs: Optional[Dict[str, Any]] = None, + kwargs: dict[str, Any] | None = None, queue_name: str = "default", job_timeout: int = 3600, - job_id: Optional[str] = None, + job_id: str | None = None, ) -> Job: """Enqueue a task for background processing.""" queue = self.queues.get(queue_name, default_queue) kwargs = kwargs or {} - + try: job = queue.enqueue( func, @@ -69,20 +67,20 @@ def enqueue_task( except Exception as e: logger.error(f"Failed to enqueue job: {e}") raise - - def get_job(self, job_id: str) -> Optional[Job]: + + def get_job(self, job_id: str) -> Job | None: """Get job by ID.""" try: return Job.fetch(job_id, connection=self.redis) except Exception as e: logger.error(f"Failed to fetch job {job_id}: {e}") return None - - def get_job_status(self, job_id: str) -> Optional[str]: + + def get_job_status(self, job_id: str) -> str | None: """Get job status.""" job = self.get_job(job_id) return job.get_status() if job else None - + def cancel_job(self, job_id: str) -> bool: """Cancel a job.""" try: @@ -95,24 +93,24 @@ def cancel_job(self, job_id: str) -> bool: except Exception as e: logger.error(f"Failed to cancel job {job_id}: {e}") return False - + def get_queue_length(self, queue_name: str = "default") -> int: """Get queue length.""" queue = self.queues.get(queue_name, default_queue) return len(queue) - + def clear_queue(self, queue_name: str = "default") -> int: """Clear all jobs from queue.""" queue = self.queues.get(queue_name, default_queue) cleared_count = queue.empty() logger.info(f"Cleared {cleared_count} jobs from queue {queue_name}") return cleared_count - + def get_failed_jobs(self, queue_name: str = "default") -> list: """Get failed jobs from queue.""" queue = self.queues.get(queue_name, default_queue) return queue.failed_job_registry.get_job_ids() - + def requeue_failed_job(self, job_id: str) -> bool: """Requeue a failed job.""" try: @@ -129,27 +127,27 @@ def requeue_failed_job(self, job_id: str) -> bool: class WorkerManager: """Manages RQ workers.""" - + def __init__(self, queue_names: list = None): self.queue_names = queue_names or ["high", "default", "low"] self.queues = [QUEUE_MAPPING[name] for name in self.queue_names] - - def start_worker(self, worker_name: Optional[str] = None) -> Worker: + + def start_worker(self, worker_name: str | None = None) -> Worker: """Start a worker process.""" worker = Worker( self.queues, connection=redis_client, name=worker_name, ) - + logger.info(f"Starting worker {worker.name} for queues {self.queue_names}") return worker - + def get_active_workers(self) -> list: """Get list of active workers.""" return Worker.all(connection=redis_client) - - def get_worker_stats(self) -> Dict[str, Any]: + + def get_worker_stats(self) -> dict[str, Any]: """Get worker statistics.""" workers = self.get_active_workers() return { @@ -188,14 +186,14 @@ def check_redis_connection() -> bool: return False -def check_queue_health() -> Dict[str, Any]: +def check_queue_health() -> dict[str, Any]: """Check overall queue system health.""" health_data = { "redis_connected": check_redis_connection(), "queue_stats": {}, "worker_stats": worker_manager.get_worker_stats(), } - + # Get queue statistics for queue_name, queue in QUEUE_MAPPING.items(): try: @@ -208,5 +206,5 @@ def check_queue_health() -> Dict[str, Any]: except Exception as e: logger.error(f"Failed to get stats for queue {queue_name}: {e}") health_data["queue_stats"][queue_name] = {"error": str(e)} - + return health_data diff --git a/app/main.py b/app/main.py index 9100bf5..a1ba675 100644 --- a/app/main.py +++ b/app/main.py @@ -1,23 +1,22 @@ """FastAPI application factory and main entry point.""" import logging -import os from contextlib import asynccontextmanager -from typing import Dict, Any +from typing import Any -from fastapi import FastAPI, Request, HTTPException +import uvicorn +from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.trustedhost import TrustedHostMiddleware from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles -import uvicorn +from app.agents.registry import initialize_agent_registry from app.config import settings from app.infrastructure.database import create_tables, engine from app.infrastructure.queue import check_queue_health -from app.agents.registry import initialize_agent_registry -from app.presentation.routers import tasks, artifacts, health, logs from app.presentation.middleware import LoggingMiddleware, SecurityMiddleware +from app.presentation.routers import artifacts, health, logs, tasks # Configure logging logging.basicConfig( @@ -38,15 +37,15 @@ async def lifespan(app: FastAPI): logger.info("Starting Memory-Break Orchestrator...") logger.info(f"Redis URL: {settings.redis_url}") logger.info(f"Database URL: {settings.database_url}") - + # Initialize database logger.info("Creating database tables...") create_tables() - + # Initialize agent registry logger.info("Initializing agent registry...") initialize_agent_registry() - + # Health checks logger.info("Performing startup health checks...") queue_health = check_queue_health() @@ -55,29 +54,29 @@ async def lifespan(app: FastAPI): logger.error(f"Check Redis server at: {settings.redis_url}") else: logger.info("Redis connection successful") - + # Note: Container management is now handled by workers logger.info("Using worker-managed container architecture for agent isolation") - + logger.info("Application startup complete!") - + yield - + # Shutdown logger.info("Shutting down Memory-Break Orchestrator...") - + # Container management is handled by workers - no cleanup needed at API level - + # Close database connections logger.info("Closing database connections...") engine.dispose() - + logger.info("Application shutdown complete!") def create_app() -> FastAPI: """Create and configure FastAPI application.""" - + app = FastAPI( title=settings.app_name, description="Memory-Break Orchestrator for AI agent evaluation following VIBE architecture", @@ -87,22 +86,22 @@ def create_app() -> FastAPI: docs_url="/docs" if settings.debug else None, redoc_url="/redoc" if settings.debug else None, ) - + # Add middleware configure_middleware(app) - + # Add routers configure_routes(app) - + # Add exception handlers configure_exception_handlers(app) - + return app def configure_middleware(app: FastAPI) -> None: """Configure application middleware.""" - + # CORS middleware app.add_middleware( CORSMiddleware, @@ -111,14 +110,14 @@ def configure_middleware(app: FastAPI) -> None: allow_methods=["GET", "POST", "PUT", "DELETE"], allow_headers=["*"], ) - + # Trusted hosts (in production) if not settings.debug: app.add_middleware( TrustedHostMiddleware, allowed_hosts=["localhost", "127.0.0.1", settings.host], ) - + # Custom middleware app.add_middleware(LoggingMiddleware) app.add_middleware(SecurityMiddleware) @@ -126,32 +125,32 @@ def configure_middleware(app: FastAPI) -> None: def configure_routes(app: FastAPI) -> None: """Configure application routes.""" - + # API routes app.include_router( health.router, prefix="/health", tags=["health"], ) - + app.include_router( tasks.router, prefix="/api/v1/tasks", tags=["tasks"], ) - + app.include_router( artifacts.router, prefix="/api/v1/artifacts", tags=["artifacts"], ) - + app.include_router( logs.router, prefix="/api/v1/logs", tags=["logs"], ) - + # Static files (for web dashboard) try: app.mount("/static", StaticFiles(directory="static"), name="static") @@ -162,12 +161,14 @@ def configure_routes(app: FastAPI) -> None: def configure_exception_handlers(app: FastAPI) -> None: """Configure global exception handlers.""" - + @app.exception_handler(HTTPException) - async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse: + async def http_exception_handler( + request: Request, exc: HTTPException + ) -> JSONResponse: """Handle HTTP exceptions.""" logger.warning(f"HTTP {exc.status_code}: {exc.detail} - {request.url}") - + return JSONResponse( status_code=exc.status_code, content={ @@ -176,12 +177,14 @@ async def http_exception_handler(request: Request, exc: HTTPException) -> JSONRe "path": str(request.url.path), }, ) - + @app.exception_handler(Exception) - async def general_exception_handler(request: Request, exc: Exception) -> JSONResponse: + async def general_exception_handler( + request: Request, exc: Exception + ) -> JSONResponse: """Handle general exceptions.""" logger.error(f"Unhandled exception: {exc}", exc_info=True) - + return JSONResponse( status_code=500, content={ @@ -198,7 +201,7 @@ async def general_exception_handler(request: Request, exc: Exception) -> JSONRes # Root endpoint @app.get("/") -async def root() -> Dict[str, Any]: +async def root() -> dict[str, Any]: """Root endpoint with system information.""" return { "name": settings.app_name, @@ -210,7 +213,7 @@ async def root() -> Dict[str, Any]: # API info endpoint @app.get("/api/v1/info") -async def api_info() -> Dict[str, Any]: +async def api_info() -> dict[str, Any]: """API information endpoint.""" return { "name": settings.app_name, diff --git a/app/presentation/middleware.py b/app/presentation/middleware.py index 564bfd0..89f9862 100644 --- a/app/presentation/middleware.py +++ b/app/presentation/middleware.py @@ -1,9 +1,10 @@ """Custom middleware for security, logging, and request processing.""" -import time import logging +import time import uuid -from typing import Callable, Dict, Any +from collections.abc import Callable +from typing import Any from fastapi import Request, Response from fastapi.responses import JSONResponse @@ -17,17 +18,17 @@ class LoggingMiddleware(BaseHTTPMiddleware): """Middleware for request/response logging.""" - + def __init__(self, app: ASGIApp, logger_name: str = "api.requests"): super().__init__(app) self.logger = logging.getLogger(logger_name) - + async def dispatch(self, request: Request, call_next: Callable) -> Response: """Process request and log details.""" # Generate correlation ID for request tracing correlation_id = str(uuid.uuid4()) request.state.correlation_id = correlation_id - + # Log request start start_time = time.time() self.logger.info( @@ -39,12 +40,12 @@ async def dispatch(self, request: Request, call_next: Callable) -> Response: "query_params": dict(request.query_params), "client_ip": request.client.host if request.client else "unknown", "user_agent": request.headers.get("user-agent", "unknown"), - } + }, ) - + try: response = await call_next(request) - + # Log response process_time = time.time() - start_time self.logger.info( @@ -56,20 +57,20 @@ async def dispatch(self, request: Request, call_next: Callable) -> Response: "path": request.url.path, "status_code": response.status_code, "process_time": process_time, - } + }, ) - + # Add correlation ID to response headers response.headers["X-Correlation-ID"] = correlation_id - + return response - + except Exception as e: # Log error process_time = time.time() - start_time self.logger.error( f"[{correlation_id}] {request.method} {request.url.path} - " - f"ERROR - {process_time:.3f}s - {str(e)}", + f"ERROR - {process_time:.3f}s - {e!s}", extra={ "correlation_id": correlation_id, "method": request.method, @@ -77,9 +78,9 @@ async def dispatch(self, request: Request, call_next: Callable) -> Response: "error": str(e), "process_time": process_time, }, - exc_info=True + exc_info=True, ) - + # Return error response return JSONResponse( status_code=500, @@ -87,13 +88,13 @@ async def dispatch(self, request: Request, call_next: Callable) -> Response: "error": "Internal server error", "correlation_id": correlation_id, }, - headers={"X-Correlation-ID": correlation_id} + headers={"X-Correlation-ID": correlation_id}, ) class SecurityMiddleware(BaseHTTPMiddleware): """Middleware for basic security headers and validation.""" - + def __init__(self, app: ASGIApp): super().__init__(app) self.blocked_paths = {"/admin", "/internal"} @@ -103,114 +104,118 @@ def __init__(self, app: ASGIApp): "/api/v1/tasks/{id}/agents": 50, # 50 agent requests per minute "/health": 200, # High limit for health checks } - self.request_counts: Dict[str, Dict[str, Any]] = {} - + self.request_counts: dict[str, dict[str, Any]] = {} + async def dispatch(self, request: Request, call_next: Callable) -> Response: """Process request with security checks.""" - + # Block access to internal paths if any(request.url.path.startswith(path) for path in self.blocked_paths): return JSONResponse( status_code=403, content={"error": "Forbidden"}, ) - + # Basic rate limiting (in-memory, simple implementation) client_ip = request.client.host if request.client else "unknown" if self._is_rate_limited(request.url.path, client_ip): return JSONResponse( status_code=429, content={"error": "Too many requests"}, - headers={"Retry-After": "60"} + headers={"Retry-After": "60"}, ) - + # Process request response = await call_next(request) - + # Add security headers response.headers["X-Content-Type-Options"] = "nosniff" response.headers["X-Frame-Options"] = "DENY" response.headers["X-XSS-Protection"] = "1; mode=block" - + if not settings.debug: - response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains" - + response.headers["Strict-Transport-Security"] = ( + "max-age=31536000; includeSubDomains" + ) + return response - + def _is_rate_limited(self, path: str, client_ip: str) -> bool: """Simple in-memory rate limiting.""" current_time = time.time() - + # Find matching rate limit path rate_limit = None for limit_path, limit_count in self.rate_limit_paths.items(): if path.startswith(limit_path): rate_limit = limit_count break - + if not rate_limit: return False - + # Initialize tracking for this IP if client_ip not in self.request_counts: self.request_counts[client_ip] = {} - + # Clean old entries (older than 1 minute) minute_ago = current_time - 60 self.request_counts[client_ip] = { - timestamp: count for timestamp, count in self.request_counts[client_ip].items() + timestamp: count + for timestamp, count in self.request_counts[client_ip].items() if float(timestamp) > minute_ago } - + # Count requests in the last minute total_requests = sum(self.request_counts[client_ip].values()) - + if total_requests >= rate_limit: return True - + # Record this request timestamp_key = str(int(current_time)) - self.request_counts[client_ip][timestamp_key] = \ + self.request_counts[client_ip][timestamp_key] = ( self.request_counts[client_ip].get(timestamp_key, 0) + 1 - + ) + return False class CompressionMiddleware(BaseHTTPMiddleware): """Middleware for response compression.""" - + def __init__(self, app: ASGIApp, minimum_size: int = 500): super().__init__(app) self.minimum_size = minimum_size - + async def dispatch(self, request: Request, call_next: Callable) -> Response: """Process request with compression support.""" response = await call_next(request) - + # Add compression headers if response is large enough - if hasattr(response, 'body') and len(response.body) > self.minimum_size: - accept_encoding = request.headers.get('accept-encoding', '') - if 'gzip' in accept_encoding.lower(): - response.headers['Content-Encoding'] = 'gzip' - + if hasattr(response, "body") and len(response.body) > self.minimum_size: + accept_encoding = request.headers.get("accept-encoding", "") + if "gzip" in accept_encoding.lower(): + response.headers["Content-Encoding"] = "gzip" + return response class APIVersionMiddleware(BaseHTTPMiddleware): """Middleware for API versioning.""" - + def __init__(self, app: ASGIApp, current_version: str = "v1"): super().__init__(app) self.current_version = current_version - + async def dispatch(self, request: Request, call_next: Callable) -> Response: """Process request with API version handling.""" # Add API version to request state request.state.api_version = self.current_version - + response = await call_next(request) - + # Add API version to response headers response.headers["X-API-Version"] = self.current_version - + return response diff --git a/app/presentation/routers/artifacts.py b/app/presentation/routers/artifacts.py index 7ed766f..dccbce7 100644 --- a/app/presentation/routers/artifacts.py +++ b/app/presentation/routers/artifacts.py @@ -3,15 +3,15 @@ import logging import os from pathlib import Path -from typing import Dict, Any +from typing import Any +from uuid import UUID -from fastapi import APIRouter, Depends, HTTPException, Response +from fastapi import APIRouter, Depends, HTTPException from fastapi.responses import FileResponse from sqlmodel import Session -from uuid import UUID -from app.infrastructure.database import get_session, DatabaseManager from app.config import settings +from app.infrastructure.database import DatabaseManager, get_session router = APIRouter() logger = logging.getLogger(__name__) @@ -30,74 +30,77 @@ async def download_artifact( db: DatabaseManager = Depends(get_db_manager), ) -> FileResponse: """Download an artifact file for a specific task and agent.""" - + logger.info(f"Downloading artifact: {task_id}/{agent}/{artifact_name}") - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get agent runs for this task agent_runs = db.get_agent_runs_for_task(task_id) - agent_run = next((run for run in agent_runs if (run.agent.value if hasattr(run.agent, 'value') else run.agent) == agent), None) - + agent_run = next( + ( + run + for run in agent_runs + if (run.agent.value if hasattr(run.agent, "value") else run.agent) == agent + ), + None, + ) + if not agent_run: - raise HTTPException( - status_code=404, - detail=f"Agent run for {agent} not found" - ) - + raise HTTPException(status_code=404, detail=f"Agent run for {agent} not found") + # Check if artifact exists in agent run if artifact_name not in agent_run.artifacts: raise HTTPException( status_code=404, - detail=f"Artifact {artifact_name} not found for agent {agent}" + detail=f"Artifact {artifact_name} not found for agent {agent}", ) - + # Get file path file_path = Path(agent_run.artifacts[artifact_name]) - + # Security check - ensure file is within expected directory expected_base = Path(settings.run_root).expanduser() / str(task_id) / agent try: file_path = file_path.resolve() expected_base = expected_base.resolve() - + # Check if file is within the expected directory if not str(file_path).startswith(str(expected_base)): logger.warning(f"Security violation: attempted access to {file_path}") raise HTTPException(status_code=403, detail="Access denied") - + except (OSError, ValueError) as e: logger.error(f"Path resolution error: {e}") raise HTTPException(status_code=400, detail="Invalid file path") - + # Check if file exists if not file_path.exists(): raise HTTPException( - status_code=404, - detail=f"Artifact file not found: {artifact_name}" + status_code=404, detail=f"Artifact file not found: {artifact_name}" ) - + # Check if file is readable if not os.access(file_path, os.R_OK): logger.error(f"File not readable: {file_path}") raise HTTPException(status_code=403, detail="File not accessible") - + # Determine media type based on file extension media_type = _get_media_type(file_path) - + # Create appropriate filename for download filename = f"{task_id}_{agent}_{artifact_name}{file_path.suffix}" - + logger.info(f"Serving file: {file_path} as {filename}") - + return FileResponse( path=str(file_path), media_type=media_type, filename=filename, - headers={"Cache-Control": "public, max-age=3600"} # Cache for 1 hour + headers={"Cache-Control": "public, max-age=3600"}, # Cache for 1 hour ) @@ -107,29 +110,29 @@ async def download_task_bundle( db: DatabaseManager = Depends(get_db_manager), ) -> FileResponse: """Download a zip bundle of all artifacts for a task.""" - + logger.info(f"Creating bundle for task: {task_id}") - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get all agent runs agent_runs = db.get_agent_runs_for_task(task_id) if not agent_runs: raise HTTPException(status_code=404, detail="No agent runs found for task") - + # Create bundle - import zipfile import tempfile - + import zipfile + try: # Create temporary zip file - with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as temp_zip: + with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as temp_zip: temp_zip_path = temp_zip.name - - with zipfile.ZipFile(temp_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + + with zipfile.ZipFile(temp_zip_path, "w", zipfile.ZIP_DEFLATED) as zipf: # Add task metadata task_info = { "task_id": str(task_id), @@ -139,14 +142,19 @@ async def download_task_bundle( "agents": task_db.agents, "created_at": task_db.created_at.isoformat(), } - + import json + zipf.writestr("task_info.json", json.dumps(task_info, indent=2)) - + # Add artifacts from each agent for agent_run in agent_runs: - agent_name = agent_run.agent.value if hasattr(agent_run.agent, 'value') else agent_run.agent - + agent_name = ( + agent_run.agent.value + if hasattr(agent_run.agent, "value") + else agent_run.agent + ) + # Add agent run metadata agent_info = { "agent": agent_name, @@ -155,9 +163,11 @@ async def download_task_bundle( "stats": agent_run.stats, "created_at": agent_run.created_at.isoformat(), } - - zipf.writestr(f"{agent_name}/agent_info.json", json.dumps(agent_info, indent=2)) - + + zipf.writestr( + f"{agent_name}/agent_info.json", json.dumps(agent_info, indent=2) + ) + # Add artifact files for artifact_name, artifact_path in agent_run.artifacts.items(): file_path = Path(artifact_path) @@ -171,22 +181,22 @@ async def download_task_bundle( logger.warning(f"Failed to add {file_path} to bundle: {e}") else: logger.warning(f"Artifact file not found: {artifact_path}") - + # Return the zip file bundle_filename = f"task_{task_id}_bundle.zip" - + return FileResponse( path=temp_zip_path, media_type="application/zip", filename=bundle_filename, headers={"Cache-Control": "no-cache"}, - background=_cleanup_temp_file(temp_zip_path) # Cleanup after sending + background=_cleanup_temp_file(temp_zip_path), # Cleanup after sending ) - + except Exception as e: logger.error(f"Failed to create bundle for task {task_id}: {e}") # Cleanup temp file on error - if 'temp_zip_path' in locals() and os.path.exists(temp_zip_path): + if "temp_zip_path" in locals() and os.path.exists(temp_zip_path): try: os.unlink(temp_zip_path) except: @@ -198,80 +208,87 @@ async def download_task_bundle( async def list_artifacts( task_id: UUID, db: DatabaseManager = Depends(get_db_manager), -) -> Dict[str, Any]: +) -> dict[str, Any]: """List all available artifacts for a task.""" - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get agent runs agent_runs = db.get_agent_runs_for_task(task_id) - + artifacts = {} total_size = 0 - + for agent_run in agent_runs: # Handle both string and enum types - agent_name = agent_run.agent.value if hasattr(agent_run.agent, 'value') else agent_run.agent + agent_name = ( + agent_run.agent.value + if hasattr(agent_run.agent, "value") + else agent_run.agent + ) agent_artifacts = {} - + for artifact_name, artifact_path in agent_run.artifacts.items(): file_path = Path(artifact_path) - + artifact_info = { "name": artifact_name, "path": str(file_path), "exists": file_path.exists(), - "download_url": f"/api/v1/artifacts/{task_id}/{agent_name}/{artifact_name}" + "download_url": f"/api/v1/artifacts/{task_id}/{agent_name}/{artifact_name}", } - + if file_path.exists(): try: stat = file_path.stat() - artifact_info.update({ - "size": stat.st_size, - "modified": stat.st_mtime, - "readable": os.access(file_path, os.R_OK) - }) + artifact_info.update( + { + "size": stat.st_size, + "modified": stat.st_mtime, + "readable": os.access(file_path, os.R_OK), + } + ) total_size += stat.st_size except Exception as e: logger.warning(f"Failed to get stats for {file_path}: {e}") artifact_info["error"] = str(e) - + agent_artifacts[artifact_name] = artifact_info - + artifacts[agent_name] = agent_artifacts - + return { "task_id": str(task_id), "artifacts": artifacts, "total_size": total_size, - "bundle_url": f"/api/v1/artifacts/{task_id}/bundle" + "bundle_url": f"/api/v1/artifacts/{task_id}/bundle", } def _get_media_type(file_path: Path) -> str: """Get media type based on file extension.""" suffix = file_path.suffix.lower() - + media_types = { - '.txt': 'text/plain', - '.log': 'text/plain', - '.json': 'application/json', - '.zip': 'application/zip', - '.csv': 'text/csv', - '.html': 'text/html', - '.xml': 'application/xml', - '.pdf': 'application/pdf', + ".txt": "text/plain", + ".log": "text/plain", + ".json": "application/json", + ".zip": "application/zip", + ".csv": "text/csv", + ".html": "text/html", + ".xml": "application/xml", + ".pdf": "application/pdf", } - - return media_types.get(suffix, 'application/octet-stream') + + return media_types.get(suffix, "application/octet-stream") def _cleanup_temp_file(file_path: str): """Background task to cleanup temporary files.""" + def cleanup(): try: if os.path.exists(file_path): @@ -279,5 +296,5 @@ def cleanup(): logger.debug(f"Cleaned up temporary file: {file_path}") except Exception as e: logger.warning(f"Failed to cleanup temp file {file_path}: {e}") - + return cleanup diff --git a/app/presentation/routers/health.py b/app/presentation/routers/health.py index f0d691b..7e3db90 100644 --- a/app/presentation/routers/health.py +++ b/app/presentation/routers/health.py @@ -1,137 +1,129 @@ """Health check endpoints.""" import logging -from typing import Dict, Any +from typing import Any from fastapi import APIRouter, Depends from sqlmodel import Session +from app.agents.registry import get_agent_registry from app.infrastructure.database import get_session from app.infrastructure.queue import check_queue_health -from app.agents.registry import get_agent_registry router = APIRouter() logger = logging.getLogger(__name__) @router.get("/") -async def health_check() -> Dict[str, Any]: +async def health_check() -> dict[str, Any]: """Basic health check endpoint.""" return { "status": "healthy", "service": "Memory-Break Orchestrator", - "timestamp": "2025-10-31T12:10:00Z" + "timestamp": "2025-10-31T12:10:00Z", } @router.get("/detailed") async def detailed_health_check( - session: Session = Depends(get_session) -) -> Dict[str, Any]: + session: Session = Depends(get_session), +) -> dict[str, Any]: """Detailed health check with all system components.""" - + health_data = { "status": "healthy", "service": "Memory-Break Orchestrator", - "components": {} + "components": {}, } - + # Database health try: # Simple query to test database connection from sqlmodel import text + session.exec(text("SELECT 1")) health_data["components"]["database"] = { "status": "healthy", - "details": "Connection successful" + "details": "Connection successful", } except Exception as e: logger.error(f"Database health check failed: {e}") - health_data["components"]["database"] = { - "status": "unhealthy", - "error": str(e) - } + health_data["components"]["database"] = {"status": "unhealthy", "error": str(e)} health_data["status"] = "degraded" - + # Queue system health try: queue_health = check_queue_health() if queue_health.get("redis_connected", False): health_data["components"]["queue"] = { "status": "healthy", - "details": queue_health + "details": queue_health, } else: health_data["components"]["queue"] = { "status": "unhealthy", - "details": queue_health + "details": queue_health, } health_data["status"] = "degraded" except Exception as e: logger.error(f"Queue health check failed: {e}") - health_data["components"]["queue"] = { - "status": "unhealthy", - "error": str(e) - } + health_data["components"]["queue"] = {"status": "unhealthy", "error": str(e)} health_data["status"] = "degraded" - + # Agent registry health try: registry = get_agent_registry() agent_health = registry.health_check() - + healthy_agents = sum( - 1 for agent_data in agent_health.values() + 1 + for agent_data in agent_health.values() if agent_data.get("available", False) ) total_agents = len(agent_health) - + if healthy_agents > 0: health_data["components"]["agents"] = { "status": "healthy" if healthy_agents == total_agents else "degraded", "healthy_count": healthy_agents, "total_count": total_agents, - "details": agent_health + "details": agent_health, } else: health_data["components"]["agents"] = { "status": "unhealthy", "healthy_count": 0, "total_count": total_agents, - "details": agent_health + "details": agent_health, } health_data["status"] = "degraded" - + except Exception as e: logger.error(f"Agent health check failed: {e}") - health_data["components"]["agents"] = { - "status": "unhealthy", - "error": str(e) - } + health_data["components"]["agents"] = {"status": "unhealthy", "error": str(e)} health_data["status"] = "degraded" - + return health_data @router.get("/readiness") -async def readiness_check( - session: Session = Depends(get_session) -) -> Dict[str, Any]: +async def readiness_check(session: Session = Depends(get_session)) -> dict[str, Any]: """Readiness probe for container orchestration.""" - + # Check critical components for readiness ready = True checks = {} - + # Database readiness try: from sqlmodel import text + session.exec(text("SELECT 1")) checks["database"] = True except Exception: checks["database"] = False ready = False - + # Queue system readiness try: queue_health = check_queue_health() @@ -141,7 +133,7 @@ async def readiness_check( except Exception: checks["queue"] = False ready = False - + # At least one agent available try: registry = get_agent_registry() @@ -152,51 +144,45 @@ async def readiness_check( except Exception: checks["agents"] = False ready = False - - return { - "ready": ready, - "checks": checks - } + + return {"ready": ready, "checks": checks} @router.get("/liveness") -async def liveness_check() -> Dict[str, Any]: +async def liveness_check() -> dict[str, Any]: """Liveness probe for container orchestration.""" # Simple liveness check - if we can respond, we're alive - return { - "alive": True, - "timestamp": "2025-10-31T12:10:00Z" - } + return {"alive": True, "timestamp": "2025-10-31T12:10:00Z"} @router.get("/metrics") -async def metrics() -> Dict[str, Any]: +async def metrics() -> dict[str, Any]: """Basic metrics endpoint.""" try: # Queue metrics queue_health = check_queue_health() - + # Agent metrics registry = get_agent_registry() agent_health = registry.health_check() - available_agents = len([ - agent for agent, data in agent_health.items() - if data.get("available", False) - ]) - + available_agents = len( + [ + agent + for agent, data in agent_health.items() + if data.get("available", False) + ] + ) + return { "queue_metrics": queue_health.get("queue_stats", {}), "worker_metrics": queue_health.get("worker_stats", {}), "agent_metrics": { "total_agents": len(agent_health), "available_agents": available_agents, - "agents": agent_health - } + "agents": agent_health, + }, } - + except Exception as e: logger.error(f"Metrics collection failed: {e}") - return { - "error": "Metrics collection failed", - "details": str(e) - } + return {"error": "Metrics collection failed", "details": str(e)} diff --git a/app/presentation/routers/logs.py b/app/presentation/routers/logs.py index 5b78bb8..61787fd 100644 --- a/app/presentation/routers/logs.py +++ b/app/presentation/routers/logs.py @@ -2,17 +2,16 @@ import json import logging +from collections.abc import AsyncGenerator from pathlib import Path -from typing import AsyncGenerator, Optional from uuid import UUID from fastapi import APIRouter, Depends, HTTPException from fastapi.responses import StreamingResponse from sqlmodel import Session -from app.infrastructure.database import get_session, DatabaseManager -from app.infrastructure.queue import get_queue_manager, QueueManager from app.config import settings +from app.infrastructure.database import DatabaseManager, get_session router = APIRouter() logger = logging.getLogger(__name__) @@ -30,105 +29,112 @@ async def stream_task_logs( db: DatabaseManager = Depends(get_db_manager), ) -> StreamingResponse: """Stream live logs for a task using Server-Sent Events.""" - + # Verify task exists task = db.get_task(task_id) if not task: raise HTTPException(status_code=404, detail="Task not found") - + async def log_generator() -> AsyncGenerator[str, None]: """Generate Server-Sent Events for live logs.""" - import time import asyncio - + import time + try: - # Send initial connection event + # Send initial connection event yield f"data: {json.dumps({'type': 'connected', 'level': 'INFO', 'task_id': str(task_id), 'message': 'Connected to live log stream', 'timestamp': task.created_at.isoformat()})}\n\n" - + # Get log file path - log_file_path = Path(settings.run_root).expanduser() / str(task_id) / "task.log" - + log_file_path = ( + Path(settings.run_root).expanduser() / str(task_id) / "task.log" + ) + # Debug info yield f"data: {json.dumps({'type': 'info', 'level': 'INFO', 'message': f'Monitoring log file: {log_file_path}', 'timestamp': task.created_at.isoformat()})}\n\n" - + # Check if log file exists, if not wait and check periodically max_wait_for_file = 60 # Wait up to 1 minute for log file to appear file_wait_start = time.time() - - while not log_file_path.exists() and (time.time() - file_wait_start) < max_wait_for_file: + + while ( + not log_file_path.exists() + and (time.time() - file_wait_start) < max_wait_for_file + ): yield f"data: {json.dumps({'type': 'info', 'level': 'INFO', 'message': 'Waiting for task to start...', 'timestamp': task.created_at.isoformat()})}\n\n" await asyncio.sleep(2) - + # Check if task status changed updated_task = db.get_task(task_id) - if updated_task and updated_task.status in ['done', 'error']: + if updated_task and updated_task.status in ["done", "error"]: yield f"data: {json.dumps({'type': 'info', 'level': 'INFO', 'message': f'Task completed with status: {updated_task.status}', 'timestamp': updated_task.updated_at.isoformat()})}\n\n" return - + if not log_file_path.exists(): - yield f"data: {json.dumps({'type': 'error', 'level': 'ERROR', 'message': f'Log file not found after waiting. Task may have failed to start.', 'timestamp': task.created_at.isoformat()})}\n\n" + yield f"data: {json.dumps({'type': 'error', 'level': 'ERROR', 'message': 'Log file not found after waiting. Task may have failed to start.', 'timestamp': task.created_at.isoformat()})}\n\n" return - + # Stream log file content last_position = 0 max_stream_time = 600 # 10 minutes max streaming stream_start = time.time() - - with open(log_file_path, 'r', encoding='utf-8') as f: + + with open(log_file_path, encoding="utf-8") as f: # Send any existing content first existing_content = f.read() if existing_content.strip(): - for line in existing_content.split('\n'): + for line in existing_content.split("\n"): if line.strip(): try: log_data = json.loads(line) # Ensure required fields - if 'timestamp' not in log_data: - log_data['timestamp'] = task.created_at.isoformat() - if 'level' not in log_data: - log_data['level'] = 'INFO' + if "timestamp" not in log_data: + log_data["timestamp"] = task.created_at.isoformat() + if "level" not in log_data: + log_data["level"] = "INFO" yield f"data: {json.dumps(log_data)}\n\n" except json.JSONDecodeError: # Handle plain text logs yield f"data: {json.dumps({'type': 'log', 'level': 'INFO', 'message': line, 'timestamp': task.created_at.isoformat()})}\n\n" - + last_position = f.tell() - + # Follow file for new content while (time.time() - stream_start) < max_stream_time: # Check if task completed updated_task = db.get_task(task_id) - if updated_task and updated_task.status in ['done', 'error']: + if updated_task and updated_task.status in ["done", "error"]: yield f"data: {json.dumps({'type': 'completed', 'level': 'INFO', 'status': updated_task.status, 'message': f'Task completed: {updated_task.status}', 'timestamp': updated_task.updated_at.isoformat()})}\n\n" break - + f.seek(last_position) new_content = f.read() if new_content: - for line in new_content.split('\n'): + for line in new_content.split("\n"): if line.strip(): try: log_data = json.loads(line) # Ensure required fields - if 'timestamp' not in log_data: - log_data['timestamp'] = task.created_at.isoformat() - if 'level' not in log_data: - log_data['level'] = 'INFO' + if "timestamp" not in log_data: + log_data["timestamp"] = ( + task.created_at.isoformat() + ) + if "level" not in log_data: + log_data["level"] = "INFO" yield f"data: {json.dumps(log_data)}\n\n" except json.JSONDecodeError: yield f"data: {json.dumps({'type': 'log', 'level': 'INFO', 'message': line, 'timestamp': task.created_at.isoformat()})}\n\n" last_position = f.tell() - + # Send heartbeat to keep connection alive if int(time.time()) % 30 == 0: # Every 30 seconds yield f"data: {json.dumps({'type': 'heartbeat', 'level': 'INFO', 'message': 'Connection alive', 'timestamp': task.created_at.isoformat()})}\n\n" - + await asyncio.sleep(1) # Non-blocking sleep - + except Exception as e: logger.error(f"Error streaming logs for task {task_id}: {e}") - yield f"data: {json.dumps({'type': 'error', 'level': 'ERROR', 'message': f'Stream error: {str(e)}', 'timestamp': task.created_at.isoformat()})}\n\n" - + yield f"data: {json.dumps({'type': 'error', 'level': 'ERROR', 'message': f'Stream error: {e!s}', 'timestamp': task.created_at.isoformat()})}\n\n" + return StreamingResponse( log_generator(), media_type="text/event-stream", # Proper SSE content type @@ -137,7 +143,7 @@ async def log_generator() -> AsyncGenerator[str, None]: "Connection": "keep-alive", "Access-Control-Allow-Origin": "*", # Fix CORS issues "Access-Control-Allow-Headers": "Cache-Control", - } + }, ) @@ -148,40 +154,47 @@ async def stream_agent_logs( db: DatabaseManager = Depends(get_db_manager), ) -> StreamingResponse: """Stream live logs for a specific agent.""" - + # Verify task exists task = db.get_task(task_id) if not task: raise HTTPException(status_code=404, detail="Task not found") - + # Verify agent exists in task if agent_name not in task.agents: raise HTTPException(status_code=404, detail="Agent not found in task") - + async def agent_log_generator() -> AsyncGenerator[str, None]: """Generate Server-Sent Events for agent-specific logs.""" - + yield f"data: {json.dumps({'type': 'connected', 'agent': agent_name, 'task_id': str(task_id)})}\n\n" - + # Get agent log file path - agent_log_path = Path(settings.run_root).expanduser() / str(task_id) / "agents" / agent_name / "session.log" - + agent_log_path = ( + Path(settings.run_root).expanduser() + / str(task_id) + / "agents" + / agent_name + / "session.log" + ) + if not agent_log_path.exists(): yield f"data: {json.dumps({'type': 'info', 'message': f'Agent {agent_name} log not found. Agent may not have started yet.'})}\n\n" return - + try: - with open(agent_log_path, 'r', encoding='utf-8') as f: + with open(agent_log_path, encoding="utf-8") as f: import time + last_position = 0 max_wait_time = 300 start_time = time.time() - + while time.time() - start_time < max_wait_time: f.seek(last_position) new_content = f.read() if new_content: - for line in new_content.split('\n'): + for line in new_content.split("\n"): if line.strip(): try: log_data = json.loads(line) @@ -189,27 +202,29 @@ async def agent_log_generator() -> AsyncGenerator[str, None]: except json.JSONDecodeError: yield f"data: {json.dumps({'type': 'agent_log', 'agent': agent_name, 'message': line})}\n\n" last_position = f.tell() - + # Check agent status agent_runs = db.get_agent_runs_for_task(task_id) - agent_run = next((run for run in agent_runs if run.agent == agent_name), None) - if agent_run and agent_run.status in ['done', 'error']: + agent_run = next( + (run for run in agent_runs if run.agent == agent_name), None + ) + if agent_run and agent_run.status in ["done", "error"]: yield f"data: {json.dumps({'type': 'agent_completed', 'agent': agent_name, 'status': agent_run.status})}\n\n" break - + time.sleep(1) - + except Exception as e: logger.error(f"Error streaming agent logs for {agent_name}: {e}") - yield f"data: {json.dumps({'type': 'error', 'message': f'Error reading agent logs: {str(e)}'})}\n\n" - + yield f"data: {json.dumps({'type': 'error', 'message': f'Error reading agent logs: {e!s}'})}\n\n" + return StreamingResponse( agent_log_generator(), media_type="text/plain", headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", - } + }, ) @@ -219,26 +234,28 @@ async def get_task_log_files( db: DatabaseManager = Depends(get_db_manager), ): """Get available log files for a task.""" - + # Verify task exists task = db.get_task(task_id) if not task: raise HTTPException(status_code=404, detail="Task not found") - + task_dir = Path(settings.run_root).expanduser() / str(task_id) log_files = [] - + if task_dir.exists(): # Main task log task_log = task_dir / "task.log" if task_log.exists(): - log_files.append({ - "name": "task.log", - "path": "task.log", - "size": task_log.stat().st_size, - "modified": task_log.stat().st_mtime - }) - + log_files.append( + { + "name": "task.log", + "path": "task.log", + "size": task_log.stat().st_size, + "modified": task_log.stat().st_mtime, + } + ) + # Agent logs agents_dir = task_dir / "agents" if agents_dir.exists(): @@ -247,41 +264,49 @@ async def get_task_log_files( # Container stdout container_stdout = agent_dir / "container_stdout.log" if container_stdout.exists(): - log_files.append({ - "name": f"{agent_dir.name}_container_stdout.log", - "path": f"agents/{agent_dir.name}/container_stdout.log", - "size": container_stdout.stat().st_size, - "modified": container_stdout.stat().st_mtime - }) - + log_files.append( + { + "name": f"{agent_dir.name}_container_stdout.log", + "path": f"agents/{agent_dir.name}/container_stdout.log", + "size": container_stdout.stat().st_size, + "modified": container_stdout.stat().st_mtime, + } + ) + # Container stderr container_stderr = agent_dir / "container_stderr.log" if container_stderr.exists(): - log_files.append({ - "name": f"{agent_dir.name}_container_stderr.log", - "path": f"agents/{agent_dir.name}/container_stderr.log", - "size": container_stderr.stat().st_size, - "modified": container_stderr.stat().st_mtime - }) - + log_files.append( + { + "name": f"{agent_dir.name}_container_stderr.log", + "path": f"agents/{agent_dir.name}/container_stderr.log", + "size": container_stderr.stat().st_size, + "modified": container_stderr.stat().st_mtime, + } + ) + session_log = agent_dir / "session.log" if session_log.exists(): - log_files.append({ - "name": f"{agent_dir.name}_session.log", - "path": f"agents/{agent_dir.name}/session.log", - "size": session_log.stat().st_size, - "modified": session_log.stat().st_mtime - }) - + log_files.append( + { + "name": f"{agent_dir.name}_session.log", + "path": f"agents/{agent_dir.name}/session.log", + "size": session_log.stat().st_size, + "modified": session_log.stat().st_mtime, + } + ) + transcript = agent_dir / "transcript.txt" if transcript.exists(): - log_files.append({ - "name": f"{agent_dir.name}_transcript.txt", - "path": f"agents/{agent_dir.name}/transcript.txt", - "size": transcript.stat().st_size, - "modified": transcript.stat().st_mtime - }) - + log_files.append( + { + "name": f"{agent_dir.name}_transcript.txt", + "path": f"agents/{agent_dir.name}/transcript.txt", + "size": transcript.stat().st_size, + "modified": transcript.stat().st_mtime, + } + ) + return {"task_id": str(task_id), "log_files": log_files} @@ -293,77 +318,86 @@ async def stream_container_logs( db: DatabaseManager = Depends(get_db_manager), ) -> StreamingResponse: """Stream live container logs (stdout/stderr) for a specific agent.""" - + # Verify task exists task = db.get_task(task_id) if not task: raise HTTPException(status_code=404, detail="Task not found") - + async def container_log_generator() -> AsyncGenerator[str, None]: """Generate Server-Sent Events for container logs.""" - import time import asyncio - + import time + try: yield f"data: {json.dumps({'type': 'connected', 'agent': agent_name, 'log_type': log_type, 'task_id': str(task_id)})}\n\n" - + # Get container log file path log_filename = f"container_{log_type}.log" - container_log_path = Path(settings.run_root).expanduser() / str(task_id) / "agents" / agent_name / log_filename - + container_log_path = ( + Path(settings.run_root).expanduser() + / str(task_id) + / "agents" + / agent_name + / log_filename + ) + # Wait for log file to appear max_wait_time = 60 wait_start = time.time() - - while not container_log_path.exists() and (time.time() - wait_start) < max_wait_time: + + while ( + not container_log_path.exists() + and (time.time() - wait_start) < max_wait_time + ): yield f"data: {json.dumps({'type': 'info', 'message': f'Waiting for container {log_type} log...'})}\n\n" await asyncio.sleep(2) - + if not container_log_path.exists(): yield f"data: {json.dumps({'type': 'error', 'message': f'Container {log_type} log not found. Agent may not have started.'})}\n\n" return - + # Stream log file content last_position = 0 max_stream_time = 7200 # 2 hours stream_start = time.time() - - with open(container_log_path, 'r', encoding='utf-8', errors='replace') as f: + + with open(container_log_path, encoding="utf-8", errors="replace") as f: # Send existing content existing_content = f.read() if existing_content.strip(): - for line in existing_content.split('\n'): + for line in existing_content.split("\n"): if line.strip(): yield f"data: {json.dumps({'type': 'container_log', 'agent': agent_name, 'log_type': log_type, 'message': line})}\n\n" - + last_position = f.tell() - + # Follow file for new content while (time.time() - stream_start) < max_stream_time: # Check if task completed updated_task = db.get_task(task_id) - if updated_task and updated_task.status in ['done', 'error']: + if updated_task and updated_task.status in ["done", "error"]: yield f"data: {json.dumps({'type': 'completed', 'status': updated_task.status, 'message': f'Task completed: {updated_task.status}'})}\n\n" break - + f.seek(last_position) new_content = f.read() if new_content: - for line in new_content.split('\n'): + for line in new_content.split("\n"): if line.strip(): yield f"data: {json.dumps({'type': 'container_log', 'agent': agent_name, 'log_type': log_type, 'message': line})}\n\n" last_position = f.tell() - + # Heartbeat if int(time.time()) % 30 == 0: yield f"data: {json.dumps({'type': 'heartbeat', 'message': 'Connection alive'})}\n\n" - + await asyncio.sleep(1) - + except Exception as e: logger.error(f"Error streaming container logs for {agent_name}: {e}") - yield f"data: {json.dumps({'type': 'error', 'message': f'Stream error: {str(e)}'})}\n\n" - + yield f"data: {json.dumps({'type': 'error', 'message': f'Stream error: {e!s}'})}\n\n" + return StreamingResponse( container_log_generator(), media_type="text/event-stream", @@ -372,5 +406,5 @@ async def container_log_generator() -> AsyncGenerator[str, None]: "Connection": "keep-alive", "Access-Control-Allow-Origin": "*", "Access-Control-Allow-Headers": "Cache-Control", - } + }, ) diff --git a/app/presentation/routers/tasks.py b/app/presentation/routers/tasks.py index dbf54fe..5a54c7c 100644 --- a/app/presentation/routers/tasks.py +++ b/app/presentation/routers/tasks.py @@ -1,20 +1,22 @@ """Task management endpoints.""" import logging -from typing import List, Dict, Any, Optional +from typing import Any from uuid import UUID -from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks +from fastapi import APIRouter, Depends, HTTPException +from pydantic import BaseModel, Field, HttpUrl from sqlmodel import Session -from pydantic import BaseModel, HttpUrl, Field +from app.agents.registry import validate_agent_list from app.domain.entities import ( - Task, AgentRun, TaskStatus, AgentRunStatus, AgentName, RubricDimension + AgentName, + AgentRunStatus, + RubricDimension, + TaskStatus, ) -from app.infrastructure.database import get_session, DatabaseManager +from app.infrastructure.database import DatabaseManager, get_session from workers.simple_worker import process_task_simple -from app.agents.registry import get_agent_registry, validate_agent_list -from app.config import settings router = APIRouter() logger = logging.getLogger(__name__) @@ -23,62 +25,69 @@ # Request/Response Models class CreateTaskRequest(BaseModel): """Request model for creating a new task.""" + pr_url: HttpUrl - agents: List[AgentName] = Field(default=[AgentName.IFLOW, AgentName.CLAUDE, AgentName.GEMINI]) - rubric: List[RubricDimension] = Field(default_factory=lambda: list(RubricDimension)) + agents: list[AgentName] = Field( + default=[AgentName.IFLOW, AgentName.CLAUDE, AgentName.GEMINI] + ) + rubric: list[RubricDimension] = Field(default_factory=lambda: list(RubricDimension)) max_files: int = Field(default=50, ge=1, le=1000) class TaskResponse(BaseModel): """Response model for task information.""" + id: str pr_url: str repo: str pr_number: int - agents: List[str] - rubric: List[str] + agents: list[str] + rubric: list[str] status: str max_files: int created_at: str updated_at: str - started_at: Optional[str] = None - completed_at: Optional[str] = None - changed_files: List[str] = [] - error_message: Optional[str] = None + started_at: str | None = None + completed_at: str | None = None + changed_files: list[str] = [] + error_message: str | None = None class AgentRunResponse(BaseModel): """Response model for agent run information.""" + id: str task_id: str agent: str status: str - milestones: Dict[str, Any] - artifacts: Dict[str, Any] - stats: Dict[str, Any] + milestones: dict[str, Any] + artifacts: dict[str, Any] + stats: dict[str, Any] created_at: str updated_at: str - started_at: Optional[str] = None - completed_at: Optional[str] = None - error_message: Optional[str] = None + started_at: str | None = None + completed_at: str | None = None + error_message: str | None = None retry_count: int = 0 class StatusUpdateRequest(BaseModel): """Request model for status updates from agent containers.""" + task_id: str agent_type: str status: str - message: Optional[str] = None - progress: Optional[float] = None - timestamp: Optional[str] = None - memory_usage: Optional[float] = None - cpu_usage: Optional[float] = None + message: str | None = None + progress: float | None = None + timestamp: str | None = None + memory_usage: float | None = None + cpu_usage: float | None = None class TaskListResponse(BaseModel): """Response model for task listing.""" - tasks: List[TaskResponse] + + tasks: list[TaskResponse] total: int page: int page_size: int @@ -97,24 +106,23 @@ async def create_task( db: DatabaseManager = Depends(get_db_manager), ) -> TaskResponse: """Create a new memory-break evaluation task.""" - + logger.info(f"Creating task for PR: {request.pr_url}") - + # Validate agents are available if not validate_agent_list(request.agents): raise HTTPException( - status_code=400, - detail="One or more requested agents are not available" + status_code=400, detail="One or more requested agents are not available" ) - + # Parse GitHub PR URL pr_info = _parse_github_pr_url(str(request.pr_url)) if not pr_info: raise HTTPException( status_code=400, - detail="Invalid GitHub PR URL format. Expected: https://github.com/owner/repo/pull/number" + detail="Invalid GitHub PR URL format. Expected: https://github.com/owner/repo/pull/number", ) - + try: # Create task in database task_data = { @@ -126,9 +134,9 @@ async def create_task( "status": TaskStatus.QUEUED, "max_files": request.max_files, } - + task_db = db.create_task(task_data) - + # Create agent runs for agent in request.agents: agent_run_data = { @@ -137,32 +145,37 @@ async def create_task( "status": AgentRunStatus.QUEUED.value, # Convert enum to string for database } db.create_agent_run(agent_run_data) - + # Automatically enqueue task for simple processing try: # Update task status to running db.update_task(task_db.id, {"status": TaskStatus.RUNNING.value}) - + # Enqueue simple task processing job result = process_task_simple(str(task_db.id)) - - logger.info(f"Created and auto-enqueued task {task_db.id} for simple processing: {result}") - + + logger.info( + f"Created and auto-enqueued task {task_db.id} for simple processing: {result}" + ) + # Get updated task with running status updated_task = db.get_task(task_db.id) return _task_db_to_response(updated_task) - + except Exception as e: logger.error(f"Failed to enqueue task {task_db.id}: {e}") # Reset task status on failure - db.update_task(task_db.id, { - "status": TaskStatus.ERROR.value, - "error_message": f"Failed to enqueue: {str(e)}" - }) + db.update_task( + task_db.id, + { + "status": TaskStatus.ERROR.value, + "error_message": f"Failed to enqueue: {e!s}", + }, + ) # Return the task even if enqueueing failed updated_task = db.get_task(task_db.id) return _task_db_to_response(updated_task) - + except Exception as e: logger.error(f"Failed to create task: {e}") raise HTTPException(status_code=500, detail="Failed to create task") @@ -172,28 +185,27 @@ async def create_task( async def list_tasks( page: int = 1, page_size: int = 20, - status: Optional[TaskStatus] = None, + status: TaskStatus | None = None, db: DatabaseManager = Depends(get_db_manager), ) -> TaskListResponse: """List tasks with pagination and optional filtering.""" - + try: # Convert enum to string for database query status_str = status.value if status else None - + # Get tasks from database - tasks_db, total = db.list_tasks(page=page, page_size=page_size, status=status_str) - + tasks_db, total = db.list_tasks( + page=page, page_size=page_size, status=status_str + ) + # Convert to response models tasks = [_task_db_to_response(task) for task in tasks_db] - + return TaskListResponse( - tasks=tasks, - total=total, - page=page, - page_size=page_size + tasks=tasks, total=total, page=page, page_size=page_size ) - + except Exception as e: logger.error(f"Failed to list tasks: {e}") raise HTTPException(status_code=500, detail="Failed to list tasks") @@ -205,11 +217,11 @@ async def get_task( db: DatabaseManager = Depends(get_db_manager), ) -> TaskResponse: """Get task by ID.""" - + task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + return _task_db_to_response(task_db) @@ -219,55 +231,54 @@ async def run_task( db: DatabaseManager = Depends(get_db_manager), ) -> TaskResponse: """Start execution of a task.""" - + task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + if task_db.status != TaskStatus.QUEUED: raise HTTPException( - status_code=400, - detail=f"Task cannot be run in status: {task_db.status}" + status_code=400, detail=f"Task cannot be run in status: {task_db.status}" ) - + try: # Update task status to running db.update_task(task_id, {"status": TaskStatus.RUNNING}) - + # Enqueue simple task processing job result = process_task_simple(str(task_id)) - + logger.info(f"Enqueued task {task_id} for simple processing: {result}") - + # Return updated task updated_task = db.get_task(task_id) return _task_db_to_response(updated_task) - + except Exception as e: logger.error(f"Failed to start task {task_id}: {e}") # Reset task status on failure - db.update_task(task_id, { - "status": TaskStatus.ERROR, - "error_message": f"Failed to start: {str(e)}" - }) + db.update_task( + task_id, + {"status": TaskStatus.ERROR, "error_message": f"Failed to start: {e!s}"}, + ) raise HTTPException(status_code=500, detail="Failed to start task") -@router.get("/{task_id}/agents", response_model=List[AgentRunResponse]) +@router.get("/{task_id}/agents", response_model=list[AgentRunResponse]) async def get_task_agents( task_id: UUID, db: DatabaseManager = Depends(get_db_manager), -) -> List[AgentRunResponse]: +) -> list[AgentRunResponse]: """Get all agent runs for a task.""" - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get agent runs agent_runs = db.get_agent_runs_for_task(task_id) - + return [_agent_run_db_to_response(run) for run in agent_runs] @@ -278,22 +289,21 @@ async def get_task_agent( db: DatabaseManager = Depends(get_db_manager), ) -> AgentRunResponse: """Get specific agent run for a task.""" - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get agent runs and find the specific one agent_runs = db.get_agent_runs_for_task(task_id) agent_run = next((run for run in agent_runs if run.agent == agent), None) - + if not agent_run: raise HTTPException( - status_code=404, - detail=f"Agent run for {agent.value} not found" + status_code=404, detail=f"Agent run for {agent.value} not found" ) - + return _agent_run_db_to_response(agent_run) @@ -302,13 +312,13 @@ async def update_task_status( task_id: UUID, status_data: StatusUpdateRequest, db: DatabaseManager = Depends(get_db_manager), -) -> Dict[str, str]: +) -> dict[str, str]: """Update task status from agent container.""" - + task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + try: # Log the status update with all details logger.info( @@ -316,13 +326,13 @@ async def update_task_status( f"{status_data.status} - {status_data.message} " f"(progress: {status_data.progress}, mem: {status_data.memory_usage}MB, cpu: {status_data.cpu_usage}%)" ) - + # Optionally update task status if provided - if status_data.status in ['RUNNING', 'DONE', 'ERROR']: + if status_data.status in ["RUNNING", "DONE", "ERROR"]: db.update_task(task_id, {"status": status_data.status}) - + return {"message": "Status updated successfully"} - + except Exception as e: logger.error(f"Failed to update task status {task_id}: {e}") raise HTTPException(status_code=500, detail="Failed to update status") @@ -332,55 +342,54 @@ async def update_task_status( async def cancel_task( task_id: UUID, db: DatabaseManager = Depends(get_db_manager), -) -> Dict[str, str]: +) -> dict[str, str]: """Cancel a running task.""" - + task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + if task_db.status in [TaskStatus.DONE, TaskStatus.ERROR]: raise HTTPException( - status_code=400, - detail="Task is already completed and cannot be cancelled" + status_code=400, detail="Task is already completed and cannot be cancelled" ) - + try: # Update task status (simple worker will check this and stop) - db.update_task(task_id, { - "status": TaskStatus.ERROR, - "error_message": "Task cancelled by user" - }) - + db.update_task( + task_id, + {"status": TaskStatus.ERROR, "error_message": "Task cancelled by user"}, + ) + # Update agent runs agent_runs = db.get_agent_runs_for_task(task_id) for run in agent_runs: if run.status in [AgentRunStatus.QUEUED, AgentRunStatus.RUNNING]: - db.update_agent_run(run.id, { - "status": AgentRunStatus.ERROR, - "error_message": "Task cancelled" - }) - + db.update_agent_run( + run.id, + {"status": AgentRunStatus.ERROR, "error_message": "Task cancelled"}, + ) + logger.info(f"Cancelled task {task_id} - marked for termination") - + return {"message": "Task cancelled successfully"} - + except Exception as e: logger.error(f"Failed to cancel task {task_id}: {e}") raise HTTPException(status_code=500, detail="Failed to cancel task") # Helper functions -def _parse_github_pr_url(url: str) -> Optional[Dict[str, Any]]: +def _parse_github_pr_url(url: str) -> dict[str, Any] | None: """Parse GitHub PR URL to extract owner, repo, and PR number.""" import re - + pattern = r"https://github\.com/([^/]+)/([^/]+)/pull/(\d+)" match = re.match(pattern, url) - + if not match: return None - + return { "owner": match.group(1), "repo": match.group(2), @@ -420,8 +429,12 @@ def _agent_run_db_to_response(agent_run_db) -> AgentRunResponse: stats=agent_run_db.stats, created_at=agent_run_db.created_at.isoformat(), updated_at=agent_run_db.updated_at.isoformat(), - started_at=agent_run_db.started_at.isoformat() if agent_run_db.started_at else None, - completed_at=agent_run_db.completed_at.isoformat() if agent_run_db.completed_at else None, + started_at=agent_run_db.started_at.isoformat() + if agent_run_db.started_at + else None, + completed_at=agent_run_db.completed_at.isoformat() + if agent_run_db.completed_at + else None, error_message=agent_run_db.error_message, retry_count=agent_run_db.retry_count, ) @@ -431,17 +444,17 @@ def _agent_run_db_to_response(agent_run_db) -> AgentRunResponse: async def get_task_comparison( task_id: UUID, db: DatabaseManager = Depends(get_db_manager), -) -> Dict[str, Any]: +) -> dict[str, Any]: """Get side-by-side comparison of all agents for a task.""" - + # Verify task exists task_db = db.get_task(task_id) if not task_db: raise HTTPException(status_code=404, detail="Task not found") - + # Get all agent runs agent_runs = db.get_agent_runs_for_task(task_id) - + comparison = { "task_id": str(task_id), "pr_url": task_db.pr_url, @@ -453,10 +466,10 @@ async def get_task_comparison( "total_agents": len(agent_runs), "completed": 0, "failed": 0, - "running": 0 - } + "running": 0, + }, } - + for run in agent_runs: agent_data = { "agent": run.agent, @@ -467,19 +480,22 @@ async def get_task_comparison( "started_at": run.started_at.isoformat() if run.started_at else None, "completed_at": run.completed_at.isoformat() if run.completed_at else None, } - + # Extract key metrics from stats stats = run.stats or {} agent_data["metrics"] = { - "total_tokens": int(stats.get("total_tokens", stats.get("total_tokens_estimate", 0))), + "total_tokens": int( + stats.get("total_tokens", stats.get("total_tokens_estimate", 0)) + ), "hit_limit": stats.get("hit_limit", "false") == "true", - "compression_detected": stats.get("compression_detected", "false") == "true", + "compression_detected": stats.get("compression_detected", "false") + == "true", "deep_dive_iterations": int(stats.get("deep_dive_iterations", 0)), - "detection_method": stats.get("detection_method", "unknown") + "detection_method": stats.get("detection_method", "unknown"), } - + comparison["agents"][run.agent] = agent_data - + # Update summary counts if run.status == "done": comparison["summary"]["completed"] += 1 @@ -487,15 +503,15 @@ async def get_task_comparison( comparison["summary"]["failed"] += 1 elif run.status in ["running", "queued"]: comparison["summary"]["running"] += 1 - + # Calculate winner (agent that handled memory best) winner = None best_score = -1 - + for agent_name, agent_data in comparison["agents"].items(): if agent_data["status"] != "done": continue - + metrics = agent_data["metrics"] # Score based on: completed iterations, handled limit gracefully, compression worked score = 0 @@ -504,48 +520,49 @@ async def get_task_comparison( score += 50 # Bonus for compression if not metrics["hit_limit"]: score += 20 # Bonus for staying under limit - + if score > best_score: best_score = score winner = agent_name - + comparison["winner"] = winner comparison["best_score"] = best_score - + return comparison @router.get("/{task_id}/leaderboard") async def get_task_leaderboard( - task_id: str, - session: Session = Depends(get_session) -) -> Dict[str, Any]: + task_id: str, session: Session = Depends(get_session) +) -> dict[str, Any]: """Get leaderboard for a specific task showing agent comparison.""" - + db = DatabaseManager(session) - + # Get all agent runs for the task agent_runs = db.get_agent_runs_for_task(UUID(task_id)) - + if not agent_runs: - raise HTTPException(status_code=404, detail="Task not found or no agent runs available") - + raise HTTPException( + status_code=404, detail="Task not found or no agent runs available" + ) + leaderboard_data = [] - + for run in agent_runs: # Get scores for this agent run scores = db.get_scores_for_agent_run(run.id) - + # Calculate execution time execution_time = 0.0 if run.completed_at and run.started_at: execution_time = (run.completed_at - run.started_at).total_seconds() elif run.updated_at and run.created_at: execution_time = (run.updated_at - run.created_at).total_seconds() - + leaderboard_entry = { - "agent": run.agent.value if hasattr(run.agent, 'value') else run.agent, - "status": run.status.value if hasattr(run.status, 'value') else run.status, + "agent": run.agent.value if hasattr(run.agent, "value") else run.agent, + "status": run.status.value if hasattr(run.status, "value") else run.status, "overall_score": scores.overall_score if scores else 0.0, "scores": { "AR": scores.scores.get("AR", 0.0) if scores else 0.0, @@ -555,14 +572,13 @@ async def get_task_leaderboard( }, "passed": scores.passed if scores else False, "execution_time": execution_time, - "compression_detected": run.stats.get("compression_detected", False) if run.stats else False + "compression_detected": run.stats.get("compression_detected", False) + if run.stats + else False, } leaderboard_data.append(leaderboard_entry) - + # Sort by overall score (descending) leaderboard_data.sort(key=lambda x: x["overall_score"], reverse=True) - - return { - "task_id": task_id, - "leaderboard": leaderboard_data - } + + return {"task_id": task_id, "leaderboard": leaderboard_data} diff --git a/app/services/judge_service.py b/app/services/judge_service.py index 2a02311..d137371 100644 --- a/app/services/judge_service.py +++ b/app/services/judge_service.py @@ -4,10 +4,9 @@ import logging import re from abc import ABC, abstractmethod -from typing import Dict, List, Optional, Tuple -import openai import anthropic +import openai from app.config import settings from app.domain.entities import RubricDimension @@ -17,55 +16,53 @@ class JudgeError(Exception): """Base exception for judge errors.""" - pass class Judge(ABC): """Abstract base class for judges.""" - + @abstractmethod def evaluate( self, - questions: List[str], - pre_compression_answers: List[str], - post_compression_answers: List[str], - rubric: List[RubricDimension], - ) -> Tuple[Dict[RubricDimension, float], str]: + questions: list[str], + pre_compression_answers: list[str], + post_compression_answers: list[str], + rubric: list[RubricDimension], + ) -> tuple[dict[RubricDimension, float], str]: """ Evaluate agent performance. - + Args: questions: List of evaluation questions pre_compression_answers: Agent answers before compression post_compression_answers: Agent answers after compression rubric: List of rubric dimensions to evaluate - + Returns: Tuple of (scores_dict, rationale_string) """ - pass class HeuristicJudge(Judge): """Heuristic-based judge using keyword matching and analysis.""" - + def __init__(self): self.logger = logging.getLogger("judge.heuristic") - + def evaluate( self, - questions: List[str], - pre_compression_answers: List[str], - post_compression_answers: List[str], - rubric: List[RubricDimension], - ) -> Tuple[Dict[RubricDimension, float], str]: + questions: list[str], + pre_compression_answers: list[str], + post_compression_answers: list[str], + rubric: list[RubricDimension], + ) -> tuple[dict[RubricDimension, float], str]: """Evaluate using heuristic methods.""" - + self.logger.info(f"Evaluating with heuristic judge: {len(questions)} questions") - + scores = {} rationale_parts = [] - + # Analyze each rubric dimension for dimension in rubric: score, rationale = self._evaluate_dimension( @@ -73,47 +70,67 @@ def evaluate( ) scores[dimension] = score rationale_parts.append(f"{dimension.value}: {rationale}") - + combined_rationale = "\n".join(rationale_parts) - + self.logger.info(f"Heuristic evaluation complete: {scores}") return scores, combined_rationale - + def _evaluate_dimension( self, dimension: RubricDimension, - questions: List[str], - pre_answers: List[str], - post_answers: List[str], - ) -> Tuple[float, str]: + questions: list[str], + pre_answers: list[str], + post_answers: list[str], + ) -> tuple[float, str]: """Evaluate a specific rubric dimension.""" - + if dimension == RubricDimension.AR: # Accurate Retrieval - return self._evaluate_accurate_retrieval(questions, pre_answers, post_answers) - elif dimension == RubricDimension.TTL: # Test-Time Learning - return self._evaluate_test_time_learning(questions, pre_answers, post_answers) - elif dimension == RubricDimension.LRU: # Long-Range Understanding - return self._evaluate_long_range_understanding(questions, pre_answers, post_answers) - elif dimension == RubricDimension.SF: # Selective Forgetting - return self._evaluate_selective_forgetting(questions, pre_answers, post_answers) - else: - return 0.5, f"Unknown dimension: {dimension}" - + return self._evaluate_accurate_retrieval( + questions, pre_answers, post_answers + ) + if dimension == RubricDimension.TTL: # Test-Time Learning + return self._evaluate_test_time_learning( + questions, pre_answers, post_answers + ) + if dimension == RubricDimension.LRU: # Long-Range Understanding + return self._evaluate_long_range_understanding( + questions, pre_answers, post_answers + ) + if dimension == RubricDimension.SF: # Selective Forgetting + return self._evaluate_selective_forgetting( + questions, pre_answers, post_answers + ) + return 0.5, f"Unknown dimension: {dimension}" + def _evaluate_accurate_retrieval( - self, questions: List[str], pre_answers: List[str], post_answers: List[str] - ) -> Tuple[float, str]: + self, questions: list[str], pre_answers: list[str], post_answers: list[str] + ) -> tuple[float, str]: """Evaluate accurate retrieval capability.""" - + # Look for specific details that should be retained detail_keywords = [ - "function", "method", "class", "variable", "file", "import", - "def ", "async ", "await", "return", "raise", "except", - "if ", "for ", "while ", "with " + "function", + "method", + "class", + "variable", + "file", + "import", + "def ", + "async ", + "await", + "return", + "raise", + "except", + "if ", + "for ", + "while ", + "with ", ] - + pre_details = self._count_keywords(pre_answers, detail_keywords) post_details = self._count_keywords(post_answers, detail_keywords) - + # Score based on retention of specific details if pre_details == 0: score = 0.5 # No baseline details @@ -122,23 +139,33 @@ def _evaluate_accurate_retrieval( retention_ratio = min(post_details / pre_details, 1.0) score = 0.3 + (0.7 * retention_ratio) # Base 0.3, up to 1.0 rationale = f"Retained {post_details}/{pre_details} specific details ({retention_ratio:.2%})" - + return score, rationale - + def _evaluate_test_time_learning( - self, questions: List[str], pre_answers: List[str], post_answers: List[str] - ) -> Tuple[float, str]: + self, questions: list[str], pre_answers: list[str], post_answers: list[str] + ) -> tuple[float, str]: """Evaluate test-time learning and adaptation.""" - + # Look for learning indicators learning_keywords = [ - "would", "could", "should", "approach", "strategy", "implement", - "similar", "adapt", "modify", "improve", "optimize", "consider" + "would", + "could", + "should", + "approach", + "strategy", + "implement", + "similar", + "adapt", + "modify", + "improve", + "optimize", + "consider", ] - + learning_indicators = self._count_keywords(post_answers, learning_keywords) total_words = len(" ".join(post_answers).split()) - + if total_words == 0: score = 0.0 rationale = "No answers provided" @@ -146,65 +173,108 @@ def _evaluate_test_time_learning( learning_density = learning_indicators / max(total_words, 1) * 100 score = min(learning_density / 2, 1.0) # Normalize to 0-1 range rationale = f"Learning indicators: {learning_indicators} in {total_words} words ({learning_density:.1f}%)" - + return score, rationale - + def _evaluate_long_range_understanding( - self, questions: List[str], pre_answers: List[str], post_answers: List[str] - ) -> Tuple[float, str]: + self, questions: list[str], pre_answers: list[str], post_answers: list[str] + ) -> tuple[float, str]: """Evaluate long-range understanding and connections.""" - + # Look for connection indicators connection_keywords = [ - "connect", "relate", "depend", "impact", "affect", "integrate", - "system", "architecture", "component", "module", "service", - "because", "therefore", "however", "moreover", "furthermore" + "connect", + "relate", + "depend", + "impact", + "affect", + "integrate", + "system", + "architecture", + "component", + "module", + "service", + "because", + "therefore", + "however", + "moreover", + "furthermore", ] - + connections = self._count_keywords(post_answers, connection_keywords) - + # Look for architectural terms arch_keywords = [ - "pattern", "design", "structure", "framework", "library", - "database", "api", "interface", "protocol", "service" + "pattern", + "design", + "structure", + "framework", + "library", + "database", + "api", + "interface", + "protocol", + "service", ] - + arch_terms = self._count_keywords(post_answers, arch_keywords) - + total_indicators = connections + arch_terms score = min(total_indicators / 10, 1.0) # Normalize to 0-1 - rationale = f"Connection indicators: {connections}, Architecture terms: {arch_terms}" - + rationale = ( + f"Connection indicators: {connections}, Architecture terms: {arch_terms}" + ) + return score, rationale - + def _evaluate_selective_forgetting( - self, questions: List[str], pre_answers: List[str], post_answers: List[str] - ) -> Tuple[float, str]: + self, questions: list[str], pre_answers: list[str], post_answers: list[str] + ) -> tuple[float, str]: """Evaluate selective forgetting and updating.""" - + # Look for update/change indicators change_keywords = [ - "change", "update", "modify", "replace", "remove", "delete", - "revert", "undo", "preserve", "keep", "maintain", "retain" + "change", + "update", + "modify", + "replace", + "remove", + "delete", + "revert", + "undo", + "preserve", + "keep", + "maintain", + "retain", ] - + change_indicators = self._count_keywords(post_answers, change_keywords) - + # Look for conditional reasoning conditional_keywords = [ - "if", "when", "unless", "provided", "assuming", "given", - "depends", "varies", "different", "alternative" + "if", + "when", + "unless", + "provided", + "assuming", + "given", + "depends", + "varies", + "different", + "alternative", ] - + conditionals = self._count_keywords(post_answers, conditional_keywords) - + total_indicators = change_indicators + conditionals score = min(total_indicators / 8, 1.0) # Normalize to 0-1 - rationale = f"Change indicators: {change_indicators}, Conditionals: {conditionals}" - + rationale = ( + f"Change indicators: {change_indicators}, Conditionals: {conditionals}" + ) + return score, rationale - - def _count_keywords(self, texts: List[str], keywords: List[str]) -> int: + + def _count_keywords(self, texts: list[str], keywords: list[str]) -> int: """Count keyword occurrences in texts.""" combined_text = " ".join(texts).lower() count = 0 @@ -215,46 +285,50 @@ def _count_keywords(self, texts: List[str], keywords: List[str]) -> int: class LLMJudge(Judge): """LLM-based judge using external language models.""" - + def __init__(self, model: str = None, provider: str = "openai"): self.model = model if model is not None else settings.judge_model self.provider = provider.lower() self.logger = logging.getLogger("judge.llm") - + # Initialize API clients if self.provider == "openai" and settings.openai_api_key: self.openai_client = openai.OpenAI(api_key=settings.openai_api_key) elif self.provider == "anthropic" and settings.anthropic_api_key: - self.anthropic_client = anthropic.Anthropic(api_key=settings.anthropic_api_key) + self.anthropic_client = anthropic.Anthropic( + api_key=settings.anthropic_api_key + ) else: self.logger.warning(f"No API key configured for {provider}") - + def evaluate( self, - questions: List[str], - pre_compression_answers: List[str], - post_compression_answers: List[str], - rubric: List[RubricDimension], - ) -> Tuple[Dict[RubricDimension, float], str]: + questions: list[str], + pre_compression_answers: list[str], + post_compression_answers: list[str], + rubric: list[RubricDimension], + ) -> tuple[dict[RubricDimension, float], str]: """Evaluate using LLM judge.""" - - self.logger.info(f"Evaluating with LLM judge ({self.model}): {len(questions)} questions") - + + self.logger.info( + f"Evaluating with LLM judge ({self.model}): {len(questions)} questions" + ) + # Create evaluation prompt evaluation_prompt = self._build_evaluation_prompt( questions, pre_compression_answers, post_compression_answers, rubric ) - + try: # Get LLM response response = self._query_llm(evaluation_prompt) - + # Parse response scores, rationale = self._parse_llm_response(response, rubric) - + self.logger.info(f"LLM evaluation complete: {scores}") return scores, rationale - + except Exception as e: self.logger.error(f"LLM evaluation failed: {e}") # Fallback to heuristic judge @@ -264,23 +338,23 @@ def evaluate( ) rationale = f"LLM evaluation failed, used heuristic fallback: {rationale}" return scores, rationale - + def _build_evaluation_prompt( self, - questions: List[str], - pre_answers: List[str], - post_answers: List[str], - rubric: List[RubricDimension], + questions: list[str], + pre_answers: list[str], + post_answers: list[str], + rubric: list[RubricDimension], ) -> str: """Build evaluation prompt for LLM.""" - + rubric_descriptions = { RubricDimension.AR: "Accurate Retrieval - How well can the agent recall specific details and facts?", RubricDimension.TTL: "Test-Time Learning - How well can the agent adapt and apply knowledge to new scenarios?", RubricDimension.LRU: "Long-Range Understanding - How well can the agent understand connections and broader context?", RubricDimension.SF: "Selective Forgetting - How well can the agent update/modify its understanding when needed?", } - + prompt_parts = [ "# AI Agent Memory-Break Evaluation", "", @@ -288,66 +362,78 @@ def _build_evaluation_prompt( "", "## Rubric Dimensions:", ] - + for dim in rubric: - prompt_parts.append(f"- **{dim.value}**: {rubric_descriptions.get(dim, 'Unknown dimension')}") - - prompt_parts.extend([ - "", - "## Questions and Answers:", - "", - ]) - + prompt_parts.append( + f"- **{dim.value}**: {rubric_descriptions.get(dim, 'Unknown dimension')}" + ) + + prompt_parts.extend( + [ + "", + "## Questions and Answers:", + "", + ] + ) + for i, question in enumerate(questions, 1): - pre_answer = pre_answers[i-1] if i-1 < len(pre_answers) else "No answer" - post_answer = post_answers[i-1] if i-1 < len(post_answers) else "No answer" - - prompt_parts.extend([ - f"### Question {i}: {question}", + pre_answer = pre_answers[i - 1] if i - 1 < len(pre_answers) else "No answer" + post_answer = ( + post_answers[i - 1] if i - 1 < len(post_answers) else "No answer" + ) + + prompt_parts.extend( + [ + f"### Question {i}: {question}", + "", + f"**Pre-compression answer**: {pre_answer}", + "", + f"**Post-compression answer**: {post_answer}", + "", + ] + ) + + prompt_parts.extend( + [ + "## Instructions:", "", - f"**Pre-compression answer**: {pre_answer}", + "Please evaluate the agent's performance on each rubric dimension by comparing the pre and post compression answers.", "", - f"**Post-compression answer**: {post_answer}", + "**CRITICAL: You must provide a numeric score between 0.0 and 1.0 for each dimension:**", + "- 0.0 = Complete failure or no understanding", + "- 0.3 = Poor performance with major gaps", + "- 0.5 = Moderate performance with some issues", + "- 0.7 = Good performance with minor issues", + "- 1.0 = Excellent performance, maintains quality", "", - ]) - - prompt_parts.extend([ - "## Instructions:", - "", - "Please evaluate the agent's performance on each rubric dimension by comparing the pre and post compression answers.", - "", - "**CRITICAL: You must provide a numeric score between 0.0 and 1.0 for each dimension:**", - "- 0.0 = Complete failure or no understanding", - "- 0.3 = Poor performance with major gaps", - "- 0.5 = Moderate performance with some issues", - "- 0.7 = Good performance with minor issues", - "- 1.0 = Excellent performance, maintains quality", - "", - "Consider how well the agent maintained its understanding and ability to answer after memory compression.", - "", - "**You MUST format your response as valid JSON with numeric scores:**", - "```json", - "{", - ' "scores": {', - ]) - + "Consider how well the agent maintained its understanding and ability to answer after memory compression.", + "", + "**You MUST format your response as valid JSON with numeric scores:**", + "```json", + "{", + ' "scores": {', + ] + ) + for i, dim in enumerate(rubric): comma = "," if i < len(rubric) - 1 else "" prompt_parts.append(f' "{dim.value}": 0.0{comma}') - - prompt_parts.extend([ - ' },', - ' "rationale": "Detailed explanation of the scoring..."', - "}", - "```", - ]) - + + prompt_parts.extend( + [ + " },", + ' "rationale": "Detailed explanation of the scoring..."', + "}", + "```", + ] + ) + return "\n".join(prompt_parts) - + def _query_llm(self, prompt: str) -> str: """Query the configured LLM.""" - - if self.provider == "openai" and hasattr(self, 'openai_client'): + + if self.provider == "openai" and hasattr(self, "openai_client"): # Try with temperature first, fallback without it if model doesn't support it try: response = self.openai_client.chat.completions.create( @@ -359,119 +445,120 @@ def _query_llm(self, prompt: str) -> str: return response.choices[0].message.content except Exception as e: if "temperature" in str(e).lower(): - self.logger.warning(f"Model {self.model} doesn't support temperature parameter, using default") + self.logger.warning( + f"Model {self.model} doesn't support temperature parameter, using default" + ) response = self.openai_client.chat.completions.create( model=self.model, messages=[{"role": "user", "content": prompt}], max_tokens=2000, ) return response.choices[0].message.content - else: - raise - - elif self.provider == "anthropic" and hasattr(self, 'anthropic_client'): + raise + + elif self.provider == "anthropic" and hasattr(self, "anthropic_client"): response = self.anthropic_client.messages.create( model=self.model, max_tokens=2000, temperature=0.1, - messages=[{"role": "user", "content": prompt}] + messages=[{"role": "user", "content": prompt}], ) return response.content[0].text - + else: raise JudgeError(f"No configured client for {self.provider}") - + def _parse_llm_response( - self, response: str, rubric: List[RubricDimension] - ) -> Tuple[Dict[RubricDimension, float], str]: + self, response: str, rubric: list[RubricDimension] + ) -> tuple[dict[RubricDimension, float], str]: """Parse LLM response to extract scores and rationale.""" - + try: # Extract JSON from response - json_match = re.search(r'```json\s*(\{.*?\})\s*```', response, re.DOTALL) + json_match = re.search(r"```json\s*(\{.*?\})\s*```", response, re.DOTALL) if not json_match: # Try to find JSON without markdown - json_match = re.search(r'\{.*\}', response, re.DOTALL) - + json_match = re.search(r"\{.*\}", response, re.DOTALL) + if not json_match: raise ValueError("No JSON found in response") - + json_data = json.loads(json_match.group(1)) - + # Extract scores scores_data = json_data.get("scores", {}) scores = {} - + for dim in rubric: if dim.value in scores_data: score = float(scores_data[dim.value]) scores[dim] = max(0.0, min(1.0, score)) # Clamp to [0,1] else: scores[dim] = 0.5 # Default score - + rationale = json_data.get("rationale", "No rationale provided") - + return scores, rationale - + except Exception as e: self.logger.error(f"Failed to parse LLM response: {e}") # Return default scores - scores = {dim: 0.5 for dim in rubric} - rationale = f"Failed to parse LLM response: {str(e)}" + scores = dict.fromkeys(rubric, 0.5) + rationale = f"Failed to parse LLM response: {e!s}" return scores, rationale class JudgeService: """Service for managing different judge types.""" - + def __init__(self): self.logger = logging.getLogger("services.judge") self._judges = { "heuristic": HeuristicJudge(), "llm": LLMJudge( model=settings.judge_model, - provider="openai" if settings.openai_api_key else "anthropic" + provider="openai" if settings.openai_api_key else "anthropic", ), } - + def get_judge(self, judge_type: str = None) -> Judge: """Get judge instance by type.""" if judge_type is None: judge_type = settings.default_judge - + if judge_type not in self._judges: self.logger.warning(f"Unknown judge type: {judge_type}, using heuristic") judge_type = "heuristic" - + return self._judges[judge_type] - + def evaluate_agent_performance( self, - questions: List[str], - pre_compression_answers: List[str], - post_compression_answers: List[str], - rubric: List[RubricDimension], + questions: list[str], + pre_compression_answers: list[str], + post_compression_answers: list[str], + rubric: list[RubricDimension], judge_type: str = None, - ) -> Tuple[Dict[RubricDimension, float], str, str]: + ) -> tuple[dict[RubricDimension, float], str, str]: """ Evaluate agent performance using specified judge. - + Returns: Tuple of (scores, rationale, judge_type_used) """ - + judge = self.get_judge(judge_type) judge_type_used = judge_type or settings.default_judge - + self.logger.info(f"Evaluating agent performance with {judge_type_used} judge") - + scores, rationale = judge.evaluate( questions, pre_compression_answers, post_compression_answers, rubric ) - + return scores, rationale, judge_type_used - - def get_available_judges(self) -> List[str]: + + def get_available_judges(self) -> list[str]: """Get list of available judge types.""" return list(self._judges.keys()) @@ -483,8 +570,8 @@ def get_available_judges(self) -> List[str]: def get_judge_service() -> JudgeService: """Get global judge service instance.""" global _judge_service - + if _judge_service is None: _judge_service = JudgeService() - + return _judge_service diff --git a/app/services/pr_service.py b/app/services/pr_service.py index 761bba2..b98bd57 100644 --- a/app/services/pr_service.py +++ b/app/services/pr_service.py @@ -2,13 +2,9 @@ import logging import shutil -import subprocess from pathlib import Path -from typing import Dict, List, Optional, Tuple -from urllib.parse import urlparse -import git -from git import Repo, GitCommandError +from git import GitCommandError, Repo from app.config import settings @@ -17,14 +13,14 @@ class PRAnalysisResult: """Result of PR analysis containing repo info and changed files.""" - + def __init__( self, repo_path: Path, owner: str, repo_name: str, pr_number: int, - changed_files: List[str], + changed_files: list[str], base_branch: str, head_branch: str, commit_sha: str, @@ -37,35 +33,32 @@ def __init__( self.base_branch = base_branch self.head_branch = head_branch self.commit_sha = commit_sha - + @property def repo_full_name(self) -> str: """Get full repository name.""" return f"{self.owner}/{self.repo_name}" - + def __str__(self) -> str: return f"PR {self.pr_number} in {self.repo_full_name}: {len(self.changed_files)} files changed" class PRServiceError(Exception): """Base exception for PR service errors.""" - pass class PRCloneError(PRServiceError): """Error during repository cloning.""" - pass class PRAnalysisError(PRServiceError): """Error during PR analysis.""" - pass class PRService: """Service for handling GitHub Pull Request operations.""" - - def __init__(self, run_root: Optional[str] = None): + + def __init__(self, run_root: str | None = None): # Ensure run_root is always a Path object if run_root: self.run_root = Path(str(run_root)).expanduser() @@ -73,52 +66,52 @@ def __init__(self, run_root: Optional[str] = None): self.run_root = Path(str(settings.run_root)).expanduser() self.max_files = settings.max_files_per_task self.logger = logging.getLogger("services.pr") - + def process_pr(self, pr_url: str, task_id: str) -> PRAnalysisResult: """ Process a GitHub PR: clone repo and analyze changes. - + Args: pr_url: GitHub PR URL (https://github.com/owner/repo/pull/number) task_id: Unique task identifier - + Returns: PRAnalysisResult containing master repo path and changed files - + Raises: PRServiceError: If PR processing fails """ self.logger.info(f"Processing PR: {pr_url}") - + # Parse PR URL pr_info = self._parse_pr_url(pr_url) if not pr_info: raise PRServiceError(f"Invalid GitHub PR URL: {pr_url}") - + owner, repo_name, pr_number = pr_info - + # Create task workspace with master repo task_dir = self.run_root / task_id pr_dir = task_dir / "pr" master_repo_path = pr_dir / "master" / repo_name - + try: # Clone repository to master location self._clone_repository(owner, repo_name, master_repo_path) - + # Analyze PR changes - changed_files, base_branch, head_branch, commit_sha = self._analyze_pr_changes( - master_repo_path, pr_number + changed_files, base_branch, head_branch, commit_sha = ( + self._analyze_pr_changes(master_repo_path, pr_number) ) - + # Filter and validate files filtered_files = self._filter_changed_files(changed_files, master_repo_path) - + self.logger.info( f"Successfully processed PR {pr_number}: " f"{len(filtered_files)}/{len(changed_files)} files selected" ) - + return PRAnalysisResult( repo_path=master_repo_path, # This is the master copy owner=owner, @@ -129,7 +122,7 @@ def process_pr(self, pr_url: str, task_id: str) -> PRAnalysisResult: head_branch=head_branch, commit_sha=commit_sha, ) - + except Exception as e: self.logger.error(f"Failed to process PR {pr_url}: {e}") # Cleanup on failure @@ -139,57 +132,67 @@ def process_pr(self, pr_url: str, task_id: str) -> PRAnalysisResult: except Exception as cleanup_e: self.logger.warning(f"Failed to cleanup {task_dir}: {cleanup_e}") raise PRServiceError(f"PR processing failed: {e}") from e - - def create_agent_repo_copy(self, task_id: str, agent_name: str, agent_run_id: str, master_repo_path: Path) -> Path: + + def create_agent_repo_copy( + self, task_id: str, agent_name: str, agent_run_id: str, master_repo_path: Path + ) -> Path: """ Create an isolated repository copy for a specific agent. - + Args: task_id: Task identifier agent_name: Name of the agent (iflow, claude, gemini) agent_run_id: Unique agent run identifier master_repo_path: Path to the master repository copy - + Returns: Path to the agent's isolated repository copy - + Raises: PRServiceError: If copying fails """ - + # Create agent-specific repository path task_dir = self.run_root / task_id agent_repo_path = task_dir / "agents" / agent_name / "repo" - + try: # Ensure parent directory exists agent_repo_path.parent.mkdir(parents=True, exist_ok=True) - + # Remove existing agent repo if it exists if agent_repo_path.exists(): shutil.rmtree(agent_repo_path) - + # Copy master repository to agent's workspace - self.logger.info(f"Creating isolated repo copy for {agent_name}: {agent_repo_path}") + self.logger.info( + f"Creating isolated repo copy for {agent_name}: {agent_repo_path}" + ) shutil.copytree( src=master_repo_path, dst=agent_repo_path, symlinks=False, - ignore=shutil.ignore_patterns('.git'), # Skip .git directory for efficiency - dirs_exist_ok=True + ignore=shutil.ignore_patterns( + ".git" + ), # Skip .git directory for efficiency + dirs_exist_ok=True, + ) + + self.logger.info( + f"Successfully created isolated repo for {agent_name}: {agent_repo_path}" ) - - self.logger.info(f"Successfully created isolated repo for {agent_name}: {agent_repo_path}") return agent_repo_path - + except Exception as e: self.logger.error(f"Failed to create agent repo copy for {agent_name}: {e}") - raise PRServiceError(f"Failed to create isolated repo for {agent_name}: {e}") + raise PRServiceError( + f"Failed to create isolated repo for {agent_name}: {e}" + ) def cleanup_task_workspace(self, task_id: str) -> None: """Clean up task workspace directory.""" task_dir = self.run_root / task_id - + if task_dir.exists(): try: shutil.rmtree(task_dir) @@ -198,81 +201,83 @@ def cleanup_task_workspace(self, task_id: str) -> None: self.logger.error(f"Failed to cleanup task workspace {task_dir}: {e}") else: self.logger.debug(f"Task workspace not found: {task_dir}") - - def get_file_content(self, repo_path: Path, file_path: str, encoding: str = "utf-8") -> str: + + def get_file_content( + self, repo_path: Path, file_path: str, encoding: str = "utf-8" + ) -> str: """Get content of a specific file from the repository.""" full_path = repo_path / file_path - + if not full_path.exists(): raise FileNotFoundError(f"File not found: {file_path}") - + if not full_path.is_file(): raise ValueError(f"Path is not a file: {file_path}") - + try: - with open(full_path, 'r', encoding=encoding) as f: + with open(full_path, encoding=encoding) as f: return f.read() except UnicodeDecodeError: # Try with different encoding if UTF-8 fails try: - with open(full_path, 'r', encoding='latin-1') as f: + with open(full_path, encoding="latin-1") as f: return f.read() except Exception as e: raise ValueError(f"Failed to read file {file_path}: {e}") - - def _parse_pr_url(self, pr_url: str) -> Optional[Tuple[str, str, int]]: + + def _parse_pr_url(self, pr_url: str) -> tuple[str, str, int] | None: """Parse GitHub PR URL to extract owner, repo, and PR number.""" import re - + # Match GitHub PR URL pattern pattern = r"https://github\.com/([^/]+)/([^/]+)/pull/(\d+)" match = re.match(pattern, pr_url.strip()) - + if not match: return None - + owner = match.group(1) repo_name = match.group(2) pr_number = int(match.group(3)) - + return owner, repo_name, pr_number - + def _clone_repository(self, owner: str, repo_name: str, repo_path: Path) -> None: """Clone GitHub repository to local path.""" - + # Ensure repo_path is absolute if not repo_path.is_absolute(): repo_path = repo_path.resolve() - + # Create all parent directories repo_path.parent.mkdir(parents=True, exist_ok=True) - + # Verify parent directory was created if not repo_path.parent.exists(): raise PRCloneError(f"Failed to create parent directory: {repo_path.parent}") - + # Remove existing directory if it exists if repo_path.exists(): shutil.rmtree(repo_path) - + # GitHub repository URL repo_url = f"https://github.com/{owner}/{repo_name}.git" - + try: self.logger.info(f"Cloning repository: {repo_url}") - + # Clone with minimal history for efficiency repo = Repo.clone_from( repo_url, str(repo_path), depth=1, # Shallow clone single_branch=True, # Only default branch initially - progress=None # Disable progress reporting + progress=None, # Disable progress reporting ) - + # Fetch PR refs so we can access the PR origin = repo.remotes.origin - + # Fetch all refs to get PR information try: origin.fetch("+refs/pull/*/head:refs/remotes/origin/pr/*") @@ -280,38 +285,36 @@ def _clone_repository(self, owner: str, repo_name: str, repo_path: Path) -> None except GitCommandError as e: self.logger.warning(f"Failed to fetch PR refs: {e}") # Continue without PR refs - we'll try to get PR info differently - + self.logger.info(f"Successfully cloned {repo_url} to {repo_path}") - + except GitCommandError as e: self.logger.error(f"Git clone failed: {e}") raise PRCloneError(f"Failed to clone repository {repo_url}: {e}") except Exception as e: self.logger.error(f"Repository clone failed: {e}") raise PRCloneError(f"Failed to clone repository: {e}") - + def _analyze_pr_changes( - self, - repo_path: Path, - pr_number: int - ) -> Tuple[List[str], str, str, str]: + self, repo_path: Path, pr_number: int + ) -> tuple[list[str], str, str, str]: """ Analyze PR to find changed files. - + Returns: Tuple of (changed_files, base_branch, head_branch, commit_sha) """ try: repo = Repo(str(repo_path)) - + # Detect the actual default branch (main, master, or others) base_branch = self._get_default_branch(repo) - + # Try to get PR information via GitHub API or git commands changed_files = [] head_branch = f"pr-{pr_number}" commit_sha = str(repo.head.commit) - + # Method 1: Try to use PR refs if available try: pr_ref = f"refs/remotes/origin/pr/{pr_number}" @@ -319,30 +322,34 @@ def _analyze_pr_changes( # Get diff between PR and detected base branch pr_commit = repo.refs[pr_ref].commit base_ref = f"refs/remotes/origin/{base_branch}" - + if base_ref in [ref.name for ref in repo.refs]: base_commit = repo.refs[base_ref].commit - + diff = base_commit.diff(pr_commit) changed_files = [item.a_path or item.b_path for item in diff] - + commit_sha = str(pr_commit) head_branch = f"pr-{pr_number}" - - self.logger.debug(f"Found {len(changed_files)} changed files via PR refs (base: {base_branch})") + + self.logger.debug( + f"Found {len(changed_files)} changed files via PR refs (base: {base_branch})" + ) else: - self.logger.warning(f"Base branch {base_branch} not found in refs, falling back") + self.logger.warning( + f"Base branch {base_branch} not found in refs, falling back" + ) raise Exception("Base branch not found") - + except Exception as e: self.logger.debug(f"PR refs method failed: {e}") - + # Method 2: Fallback to recent commits analysis try: # Get recent commits and their changes commits = list(repo.iter_commits(max_count=10)) all_changed_files = set() - + for commit in commits: if commit.parents: # Skip merge commits without parents diff = commit.parents[0].diff(commit) @@ -350,134 +357,225 @@ def _analyze_pr_changes( file_path = item.a_path or item.b_path if file_path: all_changed_files.add(file_path) - + changed_files = list(all_changed_files) - self.logger.debug(f"Found {len(changed_files)} changed files via commit analysis") - + self.logger.debug( + f"Found {len(changed_files)} changed files via commit analysis" + ) + except Exception as e2: self.logger.debug(f"Commit analysis failed: {e2}") - + # Method 3: Final fallback - use all source files changed_files = self._get_source_files(repo_path) - self.logger.warning(f"Using all source files as fallback: {len(changed_files)} files") - + self.logger.warning( + f"Using all source files as fallback: {len(changed_files)} files" + ) + # Ensure we have some files if not changed_files: changed_files = self._get_source_files(repo_path) self.logger.warning("No changed files found, using all source files") - + return changed_files, base_branch, head_branch, commit_sha - + except Exception as e: self.logger.error(f"PR analysis failed: {e}") raise PRAnalysisError(f"Failed to analyze PR {pr_number}: {e}") - - def _get_source_files(self, repo_path: Path) -> List[str]: + + def _get_source_files(self, repo_path: Path) -> list[str]: """Get all source code files in the repository as fallback.""" source_extensions = { - '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c', '.h', - '.cs', '.php', '.rb', '.go', '.rs', '.kt', '.swift', '.scala', - '.sql', '.html', '.css', '.scss', '.less', '.vue', '.md', '.json', - '.yaml', '.yml', '.xml', '.toml', '.ini', '.cfg' + ".py", + ".js", + ".ts", + ".jsx", + ".tsx", + ".java", + ".cpp", + ".c", + ".h", + ".cs", + ".php", + ".rb", + ".go", + ".rs", + ".kt", + ".swift", + ".scala", + ".sql", + ".html", + ".css", + ".scss", + ".less", + ".vue", + ".md", + ".json", + ".yaml", + ".yml", + ".xml", + ".toml", + ".ini", + ".cfg", } - + source_files = [] - - for file_path in repo_path.rglob('*'): - if (file_path.is_file() and - file_path.suffix.lower() in source_extensions and - not self._is_ignored_path(file_path, repo_path)): - + + for file_path in repo_path.rglob("*"): + if ( + file_path.is_file() + and file_path.suffix.lower() in source_extensions + and not self._is_ignored_path(file_path, repo_path) + ): relative_path = file_path.relative_to(repo_path) source_files.append(str(relative_path)) - + return source_files - - def _filter_changed_files(self, changed_files: List[str], repo_path: Path) -> List[str]: + + def _filter_changed_files( + self, changed_files: list[str], repo_path: Path + ) -> list[str]: """Filter and validate changed files.""" - + filtered_files = [] - + for file_path in changed_files: full_path = repo_path / file_path - + # Skip if file doesn't exist (might be deleted) if not full_path.exists() or not full_path.is_file(): continue - + # Skip ignored paths if self._is_ignored_path(full_path, repo_path): continue - + # Skip binary files if self._is_binary_file(full_path): continue - + # Skip very large files (>1MB) if full_path.stat().st_size > 1024 * 1024: self.logger.debug(f"Skipping large file: {file_path}") continue - + filtered_files.append(file_path) - + # Limit number of files if len(filtered_files) >= self.max_files: - self.logger.warning(f"Reached max files limit ({self.max_files}), truncating") + self.logger.warning( + f"Reached max files limit ({self.max_files}), truncating" + ) break - + return filtered_files - + def _is_ignored_path(self, file_path: Path, repo_path: Path) -> bool: """Check if file path should be ignored.""" relative_path = file_path.relative_to(repo_path) path_str = str(relative_path).lower() - + # Ignored directories ignored_dirs = { - 'node_modules', '.git', '.svn', '.hg', '__pycache__', - '.pytest_cache', '.mypy_cache', '.tox', 'venv', 'env', - 'build', 'dist', 'target', 'bin', 'obj', '.idea', '.vscode', - 'coverage', '.coverage', '.nyc_output' + "node_modules", + ".git", + ".svn", + ".hg", + "__pycache__", + ".pytest_cache", + ".mypy_cache", + ".tox", + "venv", + "env", + "build", + "dist", + "target", + "bin", + "obj", + ".idea", + ".vscode", + "coverage", + ".coverage", + ".nyc_output", } - + # Check if any parent directory is ignored for part in relative_path.parts: if part.lower() in ignored_dirs: return True - + # Ignored file patterns ignored_patterns = [ - '.min.js', '.min.css', '.bundle.js', '.bundle.css', - 'package-lock.json', 'yarn.lock', 'composer.lock', - '.log', '.tmp', '.temp', '.cache' + ".min.js", + ".min.css", + ".bundle.js", + ".bundle.css", + "package-lock.json", + "yarn.lock", + "composer.lock", + ".log", + ".tmp", + ".temp", + ".cache", ] - + for pattern in ignored_patterns: if pattern in path_str: return True - + # Ignored extensions ignored_extensions = { - '.pyc', '.pyo', '.class', '.o', '.obj', '.exe', '.dll', - '.so', '.dylib', '.bin', '.jar', '.war', '.tar', '.gz', - '.zip', '.rar', '.7z', '.pdf', '.doc', '.docx', '.xls', - '.xlsx', '.ppt', '.pptx', '.png', '.jpg', '.jpeg', '.gif', - '.bmp', '.ico', '.svg', '.mp4', '.avi', '.mov', '.mp3', - '.wav', '.flac' + ".pyc", + ".pyo", + ".class", + ".o", + ".obj", + ".exe", + ".dll", + ".so", + ".dylib", + ".bin", + ".jar", + ".war", + ".tar", + ".gz", + ".zip", + ".rar", + ".7z", + ".pdf", + ".doc", + ".docx", + ".xls", + ".xlsx", + ".ppt", + ".pptx", + ".png", + ".jpg", + ".jpeg", + ".gif", + ".bmp", + ".ico", + ".svg", + ".mp4", + ".avi", + ".mov", + ".mp3", + ".wav", + ".flac", } - + if file_path.suffix.lower() in ignored_extensions: return True - + return False - + def _get_default_branch(self, repo: Repo) -> str: """ Detect the actual default branch of the repository. - + Args: repo: GitPython Repo object - + Returns: Name of the default branch (main, master, develop, etc.) """ @@ -485,47 +583,59 @@ def _get_default_branch(self, repo: Repo) -> str: # Method 1: Check if HEAD points to a specific branch try: default_branch = repo.active_branch.name - self.logger.debug(f"Default branch detected via active_branch: {default_branch}") + self.logger.debug( + f"Default branch detected via active_branch: {default_branch}" + ) return default_branch except Exception: pass - + # Method 2: Check remote refs for common default branches - remote_refs = [ref.name for ref in repo.refs if ref.name.startswith('refs/remotes/origin/')] - + remote_refs = [ + ref.name + for ref in repo.refs + if ref.name.startswith("refs/remotes/origin/") + ] + # Priority order for common default branches - for candidate in ['main', 'master', 'develop', 'dev']: - candidate_ref = f'refs/remotes/origin/{candidate}' + for candidate in ["main", "master", "develop", "dev"]: + candidate_ref = f"refs/remotes/origin/{candidate}" if candidate_ref in remote_refs: - self.logger.debug(f"Default branch detected via remote refs: {candidate}") + self.logger.debug( + f"Default branch detected via remote refs: {candidate}" + ) return candidate - + # Method 3: Try to get the default from git symbolic-ref try: # This should give us something like 'refs/heads/main' - symbolic_ref = repo.git.symbolic_ref('HEAD') - if symbolic_ref.startswith('refs/heads/'): - branch_name = symbolic_ref.replace('refs/heads/', '') - self.logger.debug(f"Default branch detected via symbolic-ref: {branch_name}") + symbolic_ref = repo.git.symbolic_ref("HEAD") + if symbolic_ref.startswith("refs/heads/"): + branch_name = symbolic_ref.replace("refs/heads/", "") + self.logger.debug( + f"Default branch detected via symbolic-ref: {branch_name}" + ) return branch_name except Exception: pass - + # Method 4: Use the first remote branch as fallback if remote_refs: # Get the branch name from the first remote ref first_ref = remote_refs[0] - if first_ref.startswith('refs/remotes/origin/'): - fallback_branch = first_ref.replace('refs/remotes/origin/', '') + if first_ref.startswith("refs/remotes/origin/"): + fallback_branch = first_ref.replace("refs/remotes/origin/", "") # Skip PR refs - if not fallback_branch.startswith('pr/'): - self.logger.debug(f"Default branch fallback to first remote: {fallback_branch}") + if not fallback_branch.startswith("pr/"): + self.logger.debug( + f"Default branch fallback to first remote: {fallback_branch}" + ) return fallback_branch - + # Final fallback - assume 'main' (modern default) self.logger.warning("Could not detect default branch, assuming 'main'") return "main" - + except Exception as e: self.logger.warning(f"Error detecting default branch: {e}, assuming 'main'") return "main" @@ -533,20 +643,22 @@ def _get_default_branch(self, repo: Repo) -> str: def _is_binary_file(self, file_path: Path) -> bool: """Check if file is binary.""" try: - with open(file_path, 'rb') as f: + with open(file_path, "rb") as f: chunk = f.read(1024) - + # Check for null bytes (common in binary files) - if b'\x00' in chunk: + if b"\x00" in chunk: return True - + # Check text/binary ratio - text_chars = sum(1 for byte in chunk if 32 <= byte <= 126 or byte in (9, 10, 13)) + text_chars = sum( + 1 for byte in chunk if 32 <= byte <= 126 or byte in (9, 10, 13) + ) if len(chunk) > 0 and text_chars / len(chunk) < 0.95: return True - + except Exception: # If we can't read the file, consider it binary return True - + return False diff --git a/app/services/prompt_service.py b/app/services/prompt_service.py index 0614d14..2f8a84c 100644 --- a/app/services/prompt_service.py +++ b/app/services/prompt_service.py @@ -2,8 +2,7 @@ import hashlib import logging -from pathlib import Path -from typing import Dict, List, Optional + import openai from app.config import settings @@ -14,93 +13,103 @@ class PromptGenerationError(Exception): """Error during prompt generation.""" - pass class GPTPromptService: """Service for generating human-like prompts using GPT.""" - + def __init__(self): self.logger = logging.getLogger("services.gpt_prompt") - + # Initialize OpenAI client if not settings.openai_api_key: raise PromptGenerationError( "OpenAI API key is required for GPT prompt generation. " "Please set OPENAI_API_KEY in your environment." ) - + self.client = openai.OpenAI(api_key=settings.openai_api_key) - self.logger.info(f"Initialized GPT prompt service with model: {settings.prompt_model}") - + self.logger.info( + f"Initialized GPT prompt service with model: {settings.prompt_model}" + ) + def generate_prompts( self, pr_result: PRAnalysisResult, max_files: int, - template_names: Optional[List[str]] = None - ) -> Dict[str, str]: + template_names: list[str] | None = None, + ) -> dict[str, str]: """ Generate all prompts for a PR analysis result using GPT. - + Args: pr_result: PR analysis result with repo info and changed files max_files: Maximum number of files to include (from UI, no hardcoding) template_names: Optional list of prompt types to generate - + Returns: Dictionary mapping prompt names to generated content - + Raises: PromptGenerationError: If prompt generation fails """ - + # Default prompt types if template_names is None: - template_names = ["precompression", "deepdive", "memory_only", "evaluator_set"] - + template_names = [ + "precompression", + "deepdive", + "memory_only", + "evaluator_set", + ] + self.logger.info( f"Generating GPT prompts for PR {pr_result.pr_number} " f"in {pr_result.repo_full_name}: {template_names} (max_files: {max_files})" ) - + try: # Build context for GPT prompt generation context = self._build_pr_context(pr_result, max_files) - + # Generate each prompt type using GPT prompts = {} for prompt_type in template_names: - prompt_content = self._generate_single_prompt_with_gpt(prompt_type, context) + prompt_content = self._generate_single_prompt_with_gpt( + prompt_type, context + ) prompts[prompt_type] = prompt_content - + self.logger.info(f"Successfully generated {len(prompts)} GPT prompts") - + return prompts - + except Exception as e: self.logger.error(f"Failed to generate GPT prompts: {e}") raise PromptGenerationError(f"GPT prompt generation failed: {e}") - - def get_prompt_hash(self, prompts: Dict[str, str]) -> str: + + def get_prompt_hash(self, prompts: dict[str, str]) -> str: """Generate hash for prompt reproducibility.""" - + # Create deterministic hash based on prompt content and settings - combined_content = f"model:{settings.prompt_model}|temp:{settings.prompt_temperature}|" + combined_content = ( + f"model:{settings.prompt_model}|temp:{settings.prompt_temperature}|" + ) for name in sorted(prompts.keys()): combined_content += f"{name}:{prompts[name]}\n" - + return hashlib.sha256(combined_content.encode()).hexdigest()[:16] - - def _build_pr_context(self, pr_result: PRAnalysisResult, max_files: int) -> Dict: + + def _build_pr_context(self, pr_result: PRAnalysisResult, max_files: int) -> dict: """Build comprehensive context about the PR for GPT.""" - + # Build file contents with size limits (using max_files from UI) at_files_content = self._build_at_files_content(pr_result, max_files) - + # Create file list limited_files = pr_result.changed_files[:max_files] file_list = "\n".join([f"- {file_path}" for file_path in limited_files]) - + context = { # Repository information "repo_name": pr_result.repo_full_name, @@ -110,26 +119,23 @@ def _build_pr_context(self, pr_result: PRAnalysisResult, max_files: int) -> Dict "base_branch": pr_result.base_branch, "head_branch": pr_result.head_branch, "commit_sha": pr_result.commit_sha, - # File information (using max_files from UI) "changed_files": limited_files, "file_count": len(limited_files), "total_files": len(pr_result.changed_files), "file_list": file_list, "max_files": max_files, - # Content for analysis "at_files": at_files_content, - # Metadata - "truncated": len(pr_result.changed_files) > max_files + "truncated": len(pr_result.changed_files) > max_files, } - + return context - - def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> str: + + def _generate_single_prompt_with_gpt(self, prompt_type: str, context: dict) -> str: """Generate a single prompt using GPT based on the type and context.""" - + # Define prompt generation instructions for each type generation_instructions = { "precompression": { @@ -140,9 +146,9 @@ def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> s "No emojis, no formal sections", "Just explain what you need them to check", "Be specific about the files but keep it natural", - "Tell them not to make stuff up about code they don't see" + "Tell them not to make stuff up about code they don't see", ], - "focus": "Quick, natural request to analyze the PR" + "focus": "Quick, natural request to analyze the PR", }, "deepdive": { "role": "Ask someone to dig deeper into the technical details", @@ -152,9 +158,9 @@ def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> s "Short sentences, direct questions", "No fancy formatting or structure", "Just say what technical aspects to check", - "Keep it conversational" + "Keep it conversational", ], - "focus": "Getting them to look at implementation details" + "focus": "Getting them to look at implementation details", }, "memory_only": { "role": "Tell someone to recall what they remember without looking back", @@ -164,9 +170,9 @@ def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> s "Tell them it's okay to say they don't remember", "Short, direct instructions", "No formal structure", - "Like asking a friend to recall something" + "Like asking a friend to recall something", ], - "focus": "Testing what they remember from before" + "focus": "Testing what they remember from before", }, "evaluator_set": { "role": "Ask specific questions to test their memory and understanding", @@ -176,29 +182,31 @@ def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> s "No fancy formatting", "Just ask what you want to know", "Tell them to be honest if they don't know", - "Casual but specific" + "Casual but specific", ], - "focus": "Direct questions about what they learned" - } + "focus": "Direct questions about what they learned", + }, } - - instruction = generation_instructions.get(prompt_type, generation_instructions["precompression"]) - + + instruction = generation_instructions.get( + prompt_type, generation_instructions["precompression"] + ) + # Create GPT prompt for prompt generation gpt_prompt = f"""Write a casual message asking someone to analyze this PR. Write like you're messaging a colleague on Slack. -PR: {context['repo_name']} #{context['pr_number']} -Files changed: {context['file_count']} -{context['file_list']} +PR: {context["repo_name"]} #{context["pr_number"]} +Files changed: {context["file_count"]} +{context["file_list"]} -What you need to do: {instruction['role']} +What you need to do: {instruction["role"]} Keep it natural and conversational. Short sentences. No emojis. No fancy formatting. Just tell them what to look at and remind them to only talk about code they actually see. -This is for {prompt_type} - {instruction['focus']} +This is for {prompt_type} - {instruction["focus"]} Requirements: -{chr(10).join(f"- {req}" for req in instruction['requirements'])} +{chr(10).join(f"- {req}" for req in instruction["requirements"])} Use the actual repo name and PR number above. Don't use template variables. Write it like you're talking directly to them.""" @@ -210,53 +218,59 @@ def _generate_single_prompt_with_gpt(self, prompt_type: str, context: Dict) -> s messages=[ { "role": "system", - "content": "You are a casual developer writing quick messages to a colleague. Write naturally like you're on Slack - short sentences, direct, no fluff. Skip emojis and formal structure. Sound human, not AI." + "content": "You are a casual developer writing quick messages to a colleague. Write naturally like you're on Slack - short sentences, direct, no fluff. Skip emojis and formal structure. Sound human, not AI.", }, - {"role": "user", "content": gpt_prompt} + {"role": "user", "content": gpt_prompt}, ], temperature=settings.prompt_temperature, - max_completion_tokens=settings.prompt_max_tokens + max_completion_tokens=settings.prompt_max_tokens, ) except Exception as e: if "temperature" in str(e).lower(): - self.logger.warning(f"Model {settings.prompt_model} doesn't support temperature {settings.prompt_temperature}, using default") + self.logger.warning( + f"Model {settings.prompt_model} doesn't support temperature {settings.prompt_temperature}, using default" + ) response = self.client.chat.completions.create( model=settings.prompt_model, messages=[ { "role": "system", - "content": "You are a casual developer writing quick messages to a colleague. Write naturally like you're on Slack - short sentences, direct, no fluff. Skip emojis and formal structure. Sound human, not AI." + "content": "You are a casual developer writing quick messages to a colleague. Write naturally like you're on Slack - short sentences, direct, no fluff. Skip emojis and formal structure. Sound human, not AI.", }, - {"role": "user", "content": gpt_prompt} + {"role": "user", "content": gpt_prompt}, ], - max_completion_tokens=settings.prompt_max_tokens + max_completion_tokens=settings.prompt_max_tokens, ) else: raise - + generated_prompt = response.choices[0].message.content.strip() - + # Replace template variables with actual values filled_prompt = self._fill_template_variables(generated_prompt, context) - + # Log if there are still unfilled templates - if '{{' in filled_prompt: - self.logger.warning(f"Unfilled templates detected in {prompt_type} prompt") + if "{{" in filled_prompt: + self.logger.warning( + f"Unfilled templates detected in {prompt_type} prompt" + ) self.logger.warning(f"Sample: {filled_prompt[:300]}") - - self.logger.info(f"Generated {prompt_type} prompt: {len(filled_prompt)} characters") - + + self.logger.info( + f"Generated {prompt_type} prompt: {len(filled_prompt)} characters" + ) + return filled_prompt - + except Exception as e: self.logger.error(f"GPT API call failed for {prompt_type}: {e}") # Fallback to a basic template if GPT fails fallback = self._get_fallback_prompt(prompt_type, context) return self._fill_template_variables(fallback, context) - - def _fill_template_variables(self, prompt: str, context: Dict) -> str: + + def _fill_template_variables(self, prompt: str, context: dict) -> str: """Replace template variables in the prompt with actual values.""" - + # Define all possible template variables and their replacements replacements = { "{{ repo_name }}": context.get("repo_name", ""), @@ -274,203 +288,207 @@ def _fill_template_variables(self, prompt: str, context: Dict) -> str: "{{ head_branch }}": context.get("head_branch", ""), "{{head_branch}}": context.get("head_branch", ""), } - + filled_prompt = prompt for template_var, value in replacements.items(): filled_prompt = filled_prompt.replace(template_var, str(value)) - + return filled_prompt - - def _build_at_files_content(self, pr_result: PRAnalysisResult, max_files: int) -> str: + + def _build_at_files_content( + self, pr_result: PRAnalysisResult, max_files: int + ) -> str: """Build the file contents section with EXTREME memory safety to prevent segfaults.""" - + try: - from app.services.pr_service import PRService import gc - import signal - + + from app.services.pr_service import PRService + # CRITICAL: Ultra-conservative limits to prevent crashes - ABSOLUTE_MAX_TOTAL = 15000 # Drastically reduced total size - ABSOLUTE_MAX_FILE = 1000 # Much smaller per-file limit - ABSOLUTE_MAX_FILES = 10 # Severely limit file count - MAX_FILE_SIZE_BYTES = 10240 # 10KB max file size on disk - + ABSOLUTE_MAX_TOTAL = 15000 # Drastically reduced total size + ABSOLUTE_MAX_FILE = 1000 # Much smaller per-file limit + ABSOLUTE_MAX_FILES = 10 # Severely limit file count + MAX_FILE_SIZE_BYTES = 10240 # 10KB max file size on disk + pr_service = PRService() at_files_parts = [] total_chars = 0 - + # Ultra-conservative file count safe_max_files = min(max_files, ABSOLUTE_MAX_FILES) files_to_process = pr_result.changed_files[:safe_max_files] - - self.logger.info(f"SAFE MODE: Processing only {len(files_to_process)} files with ultra-conservative limits") - + + self.logger.info( + f"SAFE MODE: Processing only {len(files_to_process)} files with ultra-conservative limits" + ) + # Process files with extreme caution processed_count = 0 for i, file_path in enumerate(files_to_process): try: # CRITICAL: Check current memory usage - if len('\n'.join(at_files_parts)) > ABSOLUTE_MAX_TOTAL // 2: + if len("\n".join(at_files_parts)) > ABSOLUTE_MAX_TOTAL // 2: at_files_parts.append("... [STOPPED: Memory limit approached]") break - + # CRITICAL: File size check BEFORE reading full_path = pr_result.repo_path / file_path if not full_path.exists() or not full_path.is_file(): continue - + file_size = full_path.stat().st_size if file_size > MAX_FILE_SIZE_BYTES: - at_files_parts.extend([ - f"📄 **{file_path}** (size: {file_size:,} bytes - SKIPPED)", - "File too large for safe processing", - "" - ]) + at_files_parts.extend( + [ + f"📄 **{file_path}** (size: {file_size:,} bytes - SKIPPED)", + "File too large for safe processing", + "", + ] + ) continue - + # CRITICAL: Read file with extreme safety try: - with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(full_path, encoding="utf-8", errors="ignore") as f: # Read only small chunks to prevent memory explosion content = f.read(ABSOLUTE_MAX_FILE) - + # CRITICAL: Aggressive truncation if len(content) > ABSOLUTE_MAX_FILE: content = content[:ABSOLUTE_MAX_FILE] + "\n... [TRUNCATED]" - - # CRITICAL: Safe content filtering - content = self._sanitize_content(content, ABSOLUTE_MAX_FILE // 2) - + + # CRITICAL: Safe content filtering + content = self._sanitize_content( + content, ABSOLUTE_MAX_FILE // 2 + ) + except Exception as read_error: - at_files_parts.extend([ - f"📄 **{file_path}** - READ ERROR", - f"Could not read: {str(read_error)[:50]}", - "" - ]) + at_files_parts.extend( + [ + f"📄 **{file_path}** - READ ERROR", + f"Could not read: {str(read_error)[:50]}", + "", + ] + ) continue - + # CRITICAL: Check total size before adding new_section = f"📄 **{file_path}**\n{content}\n" if total_chars + len(new_section) > ABSOLUTE_MAX_TOTAL: at_files_parts.append("... [STOPPED: Total size limit reached]") break - + # Safe to add - at_files_parts.extend([ - f"📄 **{file_path}**", - content, - "" - ]) - + at_files_parts.extend([f"📄 **{file_path}**", content, ""]) + total_chars += len(new_section) processed_count += 1 - + # CRITICAL: Force garbage collection every few files if processed_count % 3 == 0: gc.collect() - + # CRITICAL: Emergency brake if processed_count >= 5: # Process max 5 files at_files_parts.append("... [STOPPED: Safety limit reached]") break - + except Exception as file_error: self.logger.warning(f"Error processing {file_path}: {file_error}") - at_files_parts.extend([ - f"📄 **{file_path}** - ERROR", - "Processing failed", - "" - ]) + at_files_parts.extend( + [f"📄 **{file_path}** - ERROR", "Processing failed", ""] + ) continue - + # CRITICAL: Build result with final safety checks result_parts = at_files_parts[:50] # Limit number of parts result = "\n".join(result_parts) - + # CRITICAL: Final truncation if needed if len(result) > ABSOLUTE_MAX_TOTAL: result = result[:ABSOLUTE_MAX_TOTAL] + "\n... [FINAL TRUNCATION]" - + # Force garbage collection before return gc.collect() - - self.logger.info(f"SAFE MODE: Generated {len(result)} chars from {processed_count} files") + + self.logger.info( + f"SAFE MODE: Generated {len(result)} chars from {processed_count} files" + ) return result - + except Exception as e: self.logger.error(f"CRITICAL ERROR in file processing: {e}") # CRITICAL: Return absolute minimal content to prevent crash try: - file_list = ', '.join(pr_result.changed_files[:3]) + file_list = ", ".join(pr_result.changed_files[:3]) return f"Files: {file_list}\n\nERROR: {str(e)[:100]}" except: return "ERROR: Unable to process files safely" - + def _sanitize_content(self, content: str, max_length: int) -> str: """Sanitize content to prevent memory issues.""" if not content: return "" - + try: # Remove very long lines that might cause issues - lines = content.split('\n') + lines = content.split("\n") safe_lines = [] - + for line in lines[:50]: # Max 50 lines if len(line) > 200: # Truncate long lines line = line[:200] + "..." safe_lines.append(line) - + # Stop if we're getting too much content - if len('\n'.join(safe_lines)) > max_length: + if len("\n".join(safe_lines)) > max_length: break - - result = '\n'.join(safe_lines) - + + result = "\n".join(safe_lines) + # Final truncation if len(result) > max_length: result = result[:max_length] + "..." - + return result - + except Exception: return "Content could not be processed safely" - - def _get_fallback_prompt(self, prompt_type: str, context: Dict) -> str: + + def _get_fallback_prompt(self, prompt_type: str, context: dict) -> str: """Provide fallback prompts if GPT generation fails.""" - + fallbacks = { - "precompression": f"""# 🔍 Pull Request Analysis: {{{{ repo_name }}}} #{{{{ pr_number }}}} + "precompression": """# 🔍 Pull Request Analysis: {{ repo_name }} #{{ pr_number }} -I need you to analyze this GitHub Pull Request that modifies {{{{ file_count }}}} files. Please provide a thorough, fact-based analysis. +I need you to analyze this GitHub Pull Request that modifies {{ file_count }} files. Please provide a thorough, fact-based analysis. ## 📋 PR Details -- **Repository**: {{{{ repo_name }}}} -- **PR Number**: #{{{{ pr_number }}}} -- **Files Changed**: {{{{ file_count }}}} +- **Repository**: {{ repo_name }} +- **PR Number**: #{{ pr_number }} +- **Files Changed**: {{ file_count }} ## 📁 Files Modified -{{{{ file_list }}}} +{{ file_list }} ## 📄 File Contents -{{{{ at_files }}}} +{{ at_files }} **⚠️ IMPORTANT:** Base your analysis only on the actual code shown above. Do not make assumptions about code you cannot see. Please analyze the purpose, implementation, and impact of these changes.""", - "deepdive": """# 🧠 Technical Deep Dive Now let's examine the technical implementation details more closely. Please analyze: 1. **Implementation patterns** you observe in the code -2. **Technical approaches** used +2. **Technical approaches** used 3. **Error handling** mechanisms present 4. **Integration points** with existing code Focus on what you can specifically observe in the provided code.""", - "memory_only": """# 🧠 Memory-Only Analysis **⚠️ CRITICAL:** You are now in memory-only mode. Do NOT reference the original code. @@ -481,7 +499,6 @@ def _get_fallback_prompt(self, prompt_type: str, context: Dict) -> str: 3. What implementation details do you remember? If you don't remember something clearly, say so rather than guessing.""", - "evaluator_set": """# 📊 Evaluation Questions Answer based only on your memory from previous analysis: @@ -502,24 +519,25 @@ def _get_fallback_prompt(self, prompt_type: str, context: Dict) -> str: 7. What would need to be reverted if this PR was rolled back? 8. Which parts are core vs peripheral? -Be specific and honest about what you remember.""" +Be specific and honest about what you remember.""", } - + return fallbacks.get(prompt_type, fallbacks["precompression"]) def get_prompt_service() -> GPTPromptService: """Get global GPT prompt service instance.""" global _gpt_prompt_service - - if '_gpt_prompt_service' not in globals(): + + if "_gpt_prompt_service" not in globals(): if settings.use_gpt_prompts: _gpt_prompt_service = GPTPromptService() else: # Fallback to template-based service if GPT is disabled from app.services.template_prompt_service import TemplatePromptService + _gpt_prompt_service = TemplatePromptService() - + return _gpt_prompt_service diff --git a/app/services/task_logger.py b/app/services/task_logger.py index 4b1f4fc..2ccbc76 100644 --- a/app/services/task_logger.py +++ b/app/services/task_logger.py @@ -5,7 +5,7 @@ import time from datetime import datetime from pathlib import Path -from typing import Dict, Any, Optional +from typing import Any from uuid import UUID from app.config import settings @@ -13,16 +13,16 @@ class TaskLogger: """Structured logger for task execution with real-time streaming support.""" - - def __init__(self, task_id: UUID, agent_name: Optional[str] = None): + + def __init__(self, task_id: UUID, agent_name: str | None = None): self.task_id = str(task_id) self.agent_name = agent_name self.task_dir = Path(settings.run_root) / self.task_id self.task_dir.mkdir(parents=True, exist_ok=True) - + # Create log files self.main_log_file = self.task_dir / "task.log" - + if agent_name: self.agent_dir = self.task_dir / "agents" / agent_name self.agent_dir.mkdir(parents=True, exist_ok=True) @@ -31,26 +31,32 @@ def __init__(self, task_id: UUID, agent_name: Optional[str] = None): else: self.agent_log_file = None self.transcript_file = None - + # Setup logger self.logger = logging.getLogger(f"task.{self.task_id}") self.logger.setLevel(logging.INFO) - + # Remove existing handlers to prevent duplicates self.logger.handlers.clear() - + # Add file handler for main task log - main_handler = logging.FileHandler(self.main_log_file, encoding='utf-8') - main_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + main_handler = logging.FileHandler(self.main_log_file, encoding="utf-8") + main_handler.setFormatter( + logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + ) self.logger.addHandler(main_handler) - + # Add agent-specific handler if needed if self.agent_log_file: - agent_handler = logging.FileHandler(self.agent_log_file, encoding='utf-8') - agent_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + agent_handler = logging.FileHandler(self.agent_log_file, encoding="utf-8") + agent_handler.setFormatter( + logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + ) self.logger.addHandler(agent_handler) - - def log_structured(self, event_type: str, data: Dict[str, Any], level: str = "INFO"): + + def log_structured( + self, event_type: str, data: dict[str, Any], level: str = "INFO" + ): """Log structured JSON data.""" log_entry = { "timestamp": datetime.utcnow().isoformat(), @@ -58,174 +64,235 @@ def log_structured(self, event_type: str, data: Dict[str, Any], level: str = "IN "agent": self.agent_name, "type": event_type, "level": level, - **data + **data, } - + # Write to appropriate log files log_line = json.dumps(log_entry, ensure_ascii=False) - - with open(self.main_log_file, 'a', encoding='utf-8') as f: - f.write(log_line + '\n') + + with open(self.main_log_file, "a", encoding="utf-8") as f: + f.write(log_line + "\n") f.flush() - + if self.agent_log_file and self.agent_name: - with open(self.agent_log_file, 'a', encoding='utf-8') as f: - f.write(log_line + '\n') + with open(self.agent_log_file, "a", encoding="utf-8") as f: + f.write(log_line + "\n") f.flush() - + async def log_task_event(self, task_id: str, event_type: str, message: str): """Generic async task event logging for worker compatibility.""" - self.log_structured(event_type.lower(), { - "message": message - }) - + self.log_structured(event_type.lower(), {"message": message}) + def log_task_started(self, pr_url: str, agents: list, rubric: list): """Log task initiation.""" - self.log_structured("task_started", { - "pr_url": pr_url, - "agents": agents, - "rubric": rubric, - "message": f"Task started for PR: {pr_url}" - }) - + self.log_structured( + "task_started", + { + "pr_url": pr_url, + "agents": agents, + "rubric": rubric, + "message": f"Task started for PR: {pr_url}", + }, + ) + def log_pr_cloned(self, repo_path: str, changed_files: list): """Log PR clone completion.""" - self.log_structured("pr_cloned", { - "repo_path": repo_path, - "changed_files": changed_files, - "file_count": len(changed_files), - "message": f"PR cloned successfully with {len(changed_files)} changed files" - }) - - def log_prompt_generated(self, prompt_type: str, prompt_content: str, file_list: list): + self.log_structured( + "pr_cloned", + { + "repo_path": repo_path, + "changed_files": changed_files, + "file_count": len(changed_files), + "message": f"PR cloned successfully with {len(changed_files)} changed files", + }, + ) + + def log_prompt_generated( + self, prompt_type: str, prompt_content: str, file_list: list + ): """Log LLM prompt generation.""" - self.log_structured("prompt_generated", { - "prompt_type": prompt_type, - "prompt_content": prompt_content[:500] + "..." if len(prompt_content) > 500 else prompt_content, - "prompt_length": len(prompt_content), - "files_included": file_list, - "message": f"Generated {prompt_type} prompt with {len(file_list)} files" - }) - + self.log_structured( + "prompt_generated", + { + "prompt_type": prompt_type, + "prompt_content": prompt_content[:500] + "..." + if len(prompt_content) > 500 + else prompt_content, + "prompt_length": len(prompt_content), + "files_included": file_list, + "message": f"Generated {prompt_type} prompt with {len(file_list)} files", + }, + ) + def log_agent_started(self, agent_name: str, command: str): """Log agent session start.""" - self.log_structured("agent_started", { - "agent": agent_name, - "command": command, - "message": f"Started {agent_name} agent session" - }) - - def log_agent_interaction(self, agent_name: str, interaction_type: str, content: str, context: dict = None): + self.log_structured( + "agent_started", + { + "agent": agent_name, + "command": command, + "message": f"Started {agent_name} agent session", + }, + ) + + def log_agent_interaction( + self, agent_name: str, interaction_type: str, content: str, context: dict = None + ): """Log agent interactions (prompts sent, responses received, etc.).""" - self.log_structured("agent_interaction", { - "agent": agent_name, - "interaction_type": interaction_type, # 'prompt_sent', 'response_received', 'command_executed' - "content": content[:1000] + "..." if len(content) > 1000 else content, - "content_length": len(content), - "context": context or {}, - "message": f"Agent {agent_name}: {interaction_type}" - }) - + self.log_structured( + "agent_interaction", + { + "agent": agent_name, + "interaction_type": interaction_type, # 'prompt_sent', 'response_received', 'command_executed' + "content": content[:1000] + "..." if len(content) > 1000 else content, + "content_length": len(content), + "context": context or {}, + "message": f"Agent {agent_name}: {interaction_type}", + }, + ) + # Also write to transcript file for raw output if self.transcript_file: timestamp = datetime.utcnow().isoformat() - with open(self.transcript_file, 'a', encoding='utf-8') as f: + with open(self.transcript_file, "a", encoding="utf-8") as f: f.write(f"\n[{timestamp}] {interaction_type.upper()}: {agent_name}\n") - f.write(content + '\n') - f.write("-" * 80 + '\n') + f.write(content + "\n") + f.write("-" * 80 + "\n") f.flush() - - def log_compression_detected(self, agent_name: str, before_context: str, after_context: str, method: str): + + def log_compression_detected( + self, agent_name: str, before_context: str, after_context: str, method: str + ): """Log memory compression detection.""" - self.log_structured("compression_detected", { - "agent": agent_name, - "detection_method": method, # 'context_jump', 'percentage_threshold', 'heuristic' - "before_context": before_context, - "after_context": after_context, - "message": f"Memory compression detected for {agent_name} via {method}" - }) - + self.log_structured( + "compression_detected", + { + "agent": agent_name, + "detection_method": method, # 'context_jump', 'percentage_threshold', 'heuristic' + "before_context": before_context, + "after_context": after_context, + "message": f"Memory compression detected for {agent_name} via {method}", + }, + ) + def log_memory_only_started(self, agent_name: str, evaluator_questions: list): """Log start of memory-only evaluation phase.""" - self.log_structured("memory_only_started", { - "agent": agent_name, - "evaluator_questions": evaluator_questions, - "question_count": len(evaluator_questions), - "message": f"Started memory-only evaluation for {agent_name} with {len(evaluator_questions)} questions" - }) - - def log_evaluation_qa(self, agent_name: str, question: str, answer: str, question_index: int): + self.log_structured( + "memory_only_started", + { + "agent": agent_name, + "evaluator_questions": evaluator_questions, + "question_count": len(evaluator_questions), + "message": f"Started memory-only evaluation for {agent_name} with {len(evaluator_questions)} questions", + }, + ) + + def log_evaluation_qa( + self, agent_name: str, question: str, answer: str, question_index: int + ): """Log evaluation question and answer pairs.""" - self.log_structured("evaluation_qa", { - "agent": agent_name, - "question_index": question_index, - "question": question, - "answer": answer[:500] + "..." if len(answer) > 500 else answer, - "answer_length": len(answer), - "message": f"Q&A {question_index + 1} completed for {agent_name}" - }) - - def log_agent_completed(self, agent_name: str, status: str, artifacts: dict, duration: float): + self.log_structured( + "evaluation_qa", + { + "agent": agent_name, + "question_index": question_index, + "question": question, + "answer": answer[:500] + "..." if len(answer) > 500 else answer, + "answer_length": len(answer), + "message": f"Q&A {question_index + 1} completed for {agent_name}", + }, + ) + + def log_agent_completed( + self, agent_name: str, status: str, artifacts: dict, duration: float + ): """Log agent session completion.""" - self.log_structured("agent_completed", { - "agent": agent_name, - "status": status, - "artifacts": artifacts, - "duration_seconds": duration, - "message": f"Agent {agent_name} completed with status: {status}" - }) - + self.log_structured( + "agent_completed", + { + "agent": agent_name, + "status": status, + "artifacts": artifacts, + "duration_seconds": duration, + "message": f"Agent {agent_name} completed with status: {status}", + }, + ) + def log_judge_started(self, judge_type: str, agent_results: dict): """Log judge evaluation start.""" - self.log_structured("judge_started", { - "judge_type": judge_type, - "agents_evaluated": list(agent_results.keys()), - "message": f"Started {judge_type} judge evaluation" - }) - - def log_judge_scoring(self, agent_name: str, dimension: str, score: float, rationale: str): + self.log_structured( + "judge_started", + { + "judge_type": judge_type, + "agents_evaluated": list(agent_results.keys()), + "message": f"Started {judge_type} judge evaluation", + }, + ) + + def log_judge_scoring( + self, agent_name: str, dimension: str, score: float, rationale: str + ): """Log individual scoring decisions.""" - self.log_structured("judge_scoring", { - "agent": agent_name, - "dimension": dimension, - "score": score, - "rationale": rationale[:300] + "..." if len(rationale) > 300 else rationale, - "message": f"Scored {agent_name} on {dimension}: {score}" - }) - + self.log_structured( + "judge_scoring", + { + "agent": agent_name, + "dimension": dimension, + "score": score, + "rationale": rationale[:300] + "..." + if len(rationale) > 300 + else rationale, + "message": f"Scored {agent_name} on {dimension}: {score}", + }, + ) + def log_task_completed(self, status: str, final_scores: dict, duration: float): """Log task completion.""" - self.log_structured("task_completed", { - "status": status, - "final_scores": final_scores, - "duration_seconds": duration, - "message": f"Task completed with status: {status}" - }) - - def log_error(self, error_type: str, error_message: str, context: dict = None, exception: Exception = None): + self.log_structured( + "task_completed", + { + "status": status, + "final_scores": final_scores, + "duration_seconds": duration, + "message": f"Task completed with status: {status}", + }, + ) + + def log_error( + self, + error_type: str, + error_message: str, + context: dict = None, + exception: Exception = None, + ): """Log errors with context.""" error_data = { "error_type": error_type, "error_message": error_message, "context": context or {}, } - + if exception: error_data["exception_type"] = type(exception).__name__ error_data["exception_details"] = str(exception) - + self.log_structured("error", error_data, level="ERROR") - - def log_progress_update(self, stage: str, progress_percent: int, current_step: str, details: dict = None): + + def log_progress_update( + self, stage: str, progress_percent: int, current_step: str, details: dict = None + ): """Log progress updates for UI consumption.""" - self.log_structured("progress_update", { - "stage": stage, # 'pr_clone', 'agent_run', 'evaluation', 'judging' - "progress_percent": progress_percent, - "current_step": current_step, - "details": details or {}, - "message": f"{stage}: {current_step} ({progress_percent}%)" - }) - + self.log_structured( + "progress_update", + { + "stage": stage, # 'pr_clone', 'agent_run', 'evaluation', 'judging' + "progress_percent": progress_percent, + "current_step": current_step, + "details": details or {}, + "message": f"{stage}: {current_step} ({progress_percent}%)", + }, + ) + def close(self): """Close logger and cleanup handlers.""" for handler in self.logger.handlers: @@ -233,69 +300,60 @@ def close(self): self.logger.removeHandler(handler) -def get_task_logger(task_id: UUID, agent_name: Optional[str] = None) -> TaskLogger: +def get_task_logger(task_id: UUID, agent_name: str | None = None) -> TaskLogger: """Factory function to create task loggers.""" return TaskLogger(task_id, agent_name) class AgentSessionLogger: """Specialized logger for agent sessions with transcript capture.""" - + def __init__(self, task_id: UUID, agent_name: str): self.task_logger = TaskLogger(task_id, agent_name) self.agent_name = agent_name self.session_start_time = time.time() - + # Log session start self.task_logger.log_agent_started(agent_name, f"{agent_name} CLI session") - + def log_prompt_sent(self, prompt: str, prompt_type: str = "user_input"): """Log prompts sent to the agent.""" self.task_logger.log_agent_interaction( - self.agent_name, - "prompt_sent", - prompt, - {"prompt_type": prompt_type} + self.agent_name, "prompt_sent", prompt, {"prompt_type": prompt_type} ) - + def log_agent_response(self, response: str, response_type: str = "output"): """Log responses received from the agent.""" self.task_logger.log_agent_interaction( self.agent_name, - "response_received", + "response_received", response, - {"response_type": response_type} + {"response_type": response_type}, ) - + def log_command_executed(self, command: str, result: str = None): """Log commands executed within the agent session.""" context = {"command": command} if result: context["result"] = result[:200] + "..." if len(result) > 200 else result - + self.task_logger.log_agent_interaction( - self.agent_name, - "command_executed", - command, - context + self.agent_name, "command_executed", command, context ) - + def log_context_stats(self, context_left: str, stats_output: str): """Log context statistics from agent.""" self.task_logger.log_agent_interaction( self.agent_name, "context_stats", stats_output, - {"context_left": context_left} + {"context_left": context_left}, ) - + def close_session(self, status: str = "completed", artifacts: dict = None): """Close the agent session.""" duration = time.time() - self.session_start_time self.task_logger.log_agent_completed( - self.agent_name, - status, - artifacts or {}, - duration + self.agent_name, status, artifacts or {}, duration ) self.task_logger.close() diff --git a/prompts/evaluator_set.txt b/prompts/evaluator_set.txt index 51f30d6..66613cc 100644 --- a/prompts/evaluator_set.txt +++ b/prompts/evaluator_set.txt @@ -6,13 +6,13 @@ Excellent work on your memory-only analysis! Now it's time for the final evaluat --- -## 🎯 **AR: Accurate Retrieval** +## 🎯 **AR: Accurate Retrieval** *Testing your ability to accurately recall specific facts and details* ### Q1: **Main Purpose Identification** What was the primary purpose or goal of this Pull Request? Be as specific as you can based on what you remember. -### Q2: **File Inventory** +### Q2: **File Inventory** List the key files that were changed in this PR. For each file you remember, briefly describe its role or what changes were made to it. ### Q3: **Function/Method Details** @@ -40,7 +40,7 @@ Based on the implementation you analyzed, how would you go about testing these c ### Q7: **System Architecture** How do these changes fit into the overall system architecture? What role do they play in the bigger picture? -### Q8: **Ripple Effects** +### Q8: **Ripple Effects** What other parts of the codebase or system do you think might be affected by these changes? Why? ### Q9: **Long-term Implications** @@ -48,7 +48,7 @@ What are the long-term benefits or potential issues with the approach taken in t --- -## 🔄 **SF: Selective Forgetting** +## 🔄 **SF: Selective Forgetting** *Testing your ability to distinguish important vs. less important details* ### Q10: **Rollback Strategy** diff --git a/prompts/precompression.txt b/prompts/precompression.txt index b44f564..9daef73 100644 --- a/prompts/precompression.txt +++ b/prompts/precompression.txt @@ -4,7 +4,7 @@ Hello! You're about to analyze a GitHub Pull Request that modifies {{ file_count ## 📋 PR Details - **Repository**: {{ repo_name }} -- **PR Number**: #{{ pr_number }} +- **PR Number**: #{{ pr_number }} - **Base Branch**: `{{ base_branch }}` - **Head Branch**: `{{ head_branch }}` - **Files Changed**: {{ file_count }} @@ -31,7 +31,7 @@ I need you to carefully examine these code changes and provide a thorough, **fac - What are the key functions, classes, or components being modified? - Are there any new dependencies or architectural changes? -### 🔗 **Step 3: Impact Analysis** +### 🔗 **Step 3: Impact Analysis** - Which parts of the system will be affected? - Are there any potential breaking changes? - How do the changes in different files relate to each other? diff --git a/pyproject.toml b/pyproject.toml index 7d9ede8..27c9dbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,76 @@ disallow_untyped_defs = true [tool.ruff] target-version = "py311" line-length = 88 -select = ["E", "F", "UP", "B", "SIM", "I"] + +# Exclude files and directories +exclude = [ + ".git", + "__pycache__", + ".venv", + "venv", + "build", + "dist", + ".eggs", + "*.egg-info", + "alembic/versions", + "migrations", +] + +[tool.ruff.lint] +# Enable comprehensive linting rules +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "UP", # pyupgrade + "B", # flake8-bugbear + "SIM", # flake8-simplify + "I", # isort + "N", # pep8-naming + "C4", # flake8-comprehensions + "PIE", # flake8-pie + "PT", # flake8-pytest-style + "RET", # flake8-return + "RUF", # Ruff-specific rules + "T20", # flake8-print + "PLC", # pylint convention + "PLE", # pylint error + "PLR", # pylint refactor + "PLW", # pylint warning +] + +# Ignore specific rules +ignore = [ + "E501", # Line too long (handled by formatter) + "B008", # Do not perform function calls in argument defaults + "PLR0913", # Too many arguments + "PLR2004", # Magic value used in comparison + "RET504", # Unnecessary assignment before return + "T201", # Print statements (allowed in scripts) + "E402", # Module level import not at top of file (needed for some files) + "PLR0915", # Too many statements (acceptable for complex functions) + "PLR0912", # Too many branches (acceptable for complex logic) + "PLW0603", # Global statement (needed for singleton pattern) +] + +[tool.ruff.lint.isort] +known-first-party = ["app"] +force-single-line = false +force-sort-within-sections = false +split-on-trailing-comma = true + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.format] +# Use double quotes for strings +quote-style = "double" +# Indent with spaces +indent-style = "space" +# Respect magic trailing commas +skip-magic-trailing-comma = false +# Automatically detect line ending +line-ending = "auto" [tool.pytest.ini_options] asyncio_mode = "auto" diff --git a/quick-start.md b/quick-start.md index 51d41ef..8e7db59 100644 --- a/quick-start.md +++ b/quick-start.md @@ -39,7 +39,7 @@ open http://localhost:8000 1. **Open** http://localhost:8000 in your browser 2. **Paste** any GitHub PR URL (e.g., `https://github.com/owner/repo/pull/123`) 3. **Select** agents to test (iFlow, Claude, Gemini) -4. **Click** "Create Task" then "Start Task" +4. **Click** "Create Task" then "Start Task" 5. **Monitor** progress in real-time 6. **Download** ZIP bundle with all results when complete diff --git a/scripts/agent_entrypoint.sh b/scripts/agent_entrypoint.sh index 3e99b8a..5857e25 100644 --- a/scripts/agent_entrypoint.sh +++ b/scripts/agent_entrypoint.sh @@ -58,39 +58,39 @@ with socketserver.TCPServer(('', $AGENT_PORT), HealthHandler) as httpd: wait_for_task_data() { local timeout=300 # 5 minutes timeout local elapsed=0 - + log "Waiting for task data at $TASK_DATA_FILE" - + while [ ! -f "$TASK_DATA_FILE" ] && [ $elapsed -lt $timeout ]; do sleep 5 elapsed=$((elapsed + 5)) log "Still waiting for task data... (${elapsed}s elapsed)" done - + if [ ! -f "$TASK_DATA_FILE" ]; then error_exit "Task data file not found after ${timeout}s timeout" fi - + # Validate task data JSON if ! python3 -c "import json; json.load(open('$TASK_DATA_FILE'))" 2>/dev/null; then error_exit "Invalid JSON in task data file" fi - + log "Task data received and validated" } # Cleanup function cleanup() { log "Performing cleanup..." - + # Kill health server if running if [ -n "${HEALTH_PID:-}" ]; then kill $HEALTH_PID 2>/dev/null || true fi - + # Clean up temporary files rm -rf /tmp/* 2>/dev/null || true - + # Report container shutdown python3 -c " import json @@ -101,7 +101,7 @@ from datetime import datetime try: with open('$TASK_DATA_FILE', 'r') as f: task_data = json.load(f) - + task_id = task_data.get('task_id') if task_id: payload = { @@ -110,7 +110,7 @@ try: 'status': 'CONTAINER_SHUTDOWN', 'timestamp': datetime.utcnow().isoformat() } - + requests.post( f'$ORCHESTRATOR_URL/api/v1/tasks/{task_id}/status', json=payload, @@ -131,41 +131,41 @@ main() { log "Starting agent container: $AGENT_TYPE" log "Container configuration:" log " - Agent Type: $AGENT_TYPE" - log " - Agent Port: $AGENT_PORT" + log " - Agent Port: $AGENT_PORT" log " - Orchestrator: $ORCHESTRATOR_URL" log " - Max Memory: ${MAX_MEMORY:-3g}" log " - Max Execution Time: ${MAX_EXECUTION_TIME:-1800}s" - + # Verify agent type if [ "$AGENT_TYPE" = "unknown" ]; then error_exit "AGENT_TYPE environment variable not set" fi - + # Start health check server start_health_server - + # Wait for task data wait_for_task_data - + # Parse task data for logging TASK_ID=$(python3 -c "import json; data=json.load(open('$TASK_DATA_FILE')); print(data.get('task_id', 'unknown'))") PR_URL=$(python3 -c "import json; data=json.load(open('$TASK_DATA_FILE')); print(data.get('pr_url', 'unknown'))") - + log "Task Details:" log " - Task ID: $TASK_ID" log " - PR URL: $PR_URL" - + # Create necessary directories mkdir -p "/agent/workspace/$AGENT_TYPE" - mkdir -p "/agent/logs/$AGENT_TYPE" + mkdir -p "/agent/logs/$AGENT_TYPE" mkdir -p "/agent/results/$AGENT_TYPE" - + # Set up logging for agent execution exec 1> >(tee -a "/agent/logs/$AGENT_TYPE/container.log") exec 2> >(tee -a "/agent/logs/$AGENT_TYPE/container.log" >&2) - + log "Starting agent runner..." - + # Execute the agent runner with proper error handling if python3 agent_runner.py --agent="$AGENT_TYPE"; then log "Agent execution completed successfully" diff --git a/scripts/run.sh b/scripts/run.sh index 9f32c0e..c0b48cf 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -39,10 +39,10 @@ print_error() { kill_port_processes() { local port=$1 echo -e "${YELLOW}🔍 Checking for processes on port $port...${NC}" - + # Find processes using the port PIDS=$(lsof -ti:$port 2>/dev/null || true) - + if [ ! -z "$PIDS" ]; then echo -e "${YELLOW}🔪 Killing processes on port $port: $PIDS${NC}" echo $PIDS | xargs kill -9 2>/dev/null || true @@ -58,20 +58,20 @@ check_service() { local service_name=$2 local max_attempts=30 local attempt=1 - + echo -e "${YELLOW}🔍 Waiting for $service_name to be ready...${NC}" - + while [ $attempt -le $max_attempts ]; do if curl -s -f "$url" > /dev/null 2>&1; then print_status "$service_name is ready!" return 0 fi - + echo -n "." sleep 1 attempt=$((attempt + 1)) done - + print_error "$service_name failed to start after $max_attempts attempts" return 1 } @@ -79,40 +79,40 @@ check_service() { # Function to check prerequisites check_prerequisites() { echo -e "${BLUE}🔍 Checking prerequisites...${NC}" - + # Check Python if ! command -v python &> /dev/null; then print_error "Python not found" exit 1 fi print_status "Python available: $(python --version)" - + # Check Agent CLIs if command -v iflow &> /dev/null; then print_status "iFlow CLI available" else print_warning "iFlow CLI not found - iFlow agent will not work" fi - + if command -v claude &> /dev/null; then print_status "Claude CLI available" else print_warning "Claude CLI not found - Claude agent will not work" fi - + if command -v gemini &> /dev/null; then print_status "Gemini CLI available" else print_warning "Gemini CLI not found - Gemini agent will not work" fi - + # Check if .env exists if [ ! -f ".env" ]; then print_warning ".env file not found, using defaults" else print_status ".env file found" fi - + # Check PostgreSQL connection if python -c " import os @@ -133,7 +133,7 @@ except Exception as e: print_error "PostgreSQL connection failed" exit 1 fi - + # Check Redis connection if python -c " from app.config import settings @@ -156,21 +156,21 @@ except Exception as e: # Function to start API server start_api_server() { echo -e "${BLUE}🚀 Starting API server...${NC}" - + # Kill any existing processes on the API port kill_port_processes $API_PORT - + # Start the API server in the background nohup $PYTHON_CMD -m uvicorn app.main:app \ --host $API_HOST \ --port $API_PORT \ --log-level info \ > logs/api.log 2>&1 & - + API_PID=$! echo $API_PID > logs/api.pid print_status "API server started (PID: $API_PID)" - + # Check if API server is responding if check_service "http://$API_HOST:$API_PORT/health/" "API Server"; then print_status "API server health check passed" @@ -183,14 +183,14 @@ start_api_server() { # Function to start worker start_worker() { echo -e "${BLUE}👷 Starting worker process...${NC}" - + # Start the worker in the background nohup $PYTHON_CMD worker.py > logs/worker.log 2>&1 & - + WORKER_PID=$! echo $WORKER_PID > logs/worker.pid print_status "Worker started (PID: $WORKER_PID)" - + # Give worker time to initialize sleep 3 } @@ -198,14 +198,14 @@ start_worker() { # Function to verify agent dependencies verify_agent_dependencies() { echo -e "${BLUE}🔧 Verifying agent dependencies...${NC}" - + # Check Python packages if python -c "import openai; print('OpenAI package available')" 2>/dev/null; then print_status "OpenAI package available" else print_warning "OpenAI package not found - LLM judge may not work" fi - + if python -c "import anthropic; print('Anthropic package available')" 2>/dev/null; then print_status "Anthropic package available" else @@ -250,7 +250,7 @@ show_services() { # Cleanup function for graceful shutdown cleanup() { echo -e "\n${YELLOW}🛑 Shutting down services...${NC}" - + if [ -f logs/api.pid ]; then API_PID=$(cat logs/api.pid) if kill -0 $API_PID 2>/dev/null; then @@ -259,7 +259,7 @@ cleanup() { fi rm -f logs/api.pid fi - + if [ -f logs/worker.pid ]; then WORKER_PID=$(cat logs/worker.pid) if kill -0 $WORKER_PID 2>/dev/null; then @@ -268,7 +268,7 @@ cleanup() { fi rm -f logs/worker.pid fi - + echo -e "${GREEN}✅ Cleanup complete${NC}" exit 0 } @@ -282,14 +282,14 @@ main() { setup_logging check_prerequisites verify_agent_dependencies - + # Start services start_api_server start_worker - + # Show status show_services - + # Keep script running to handle signals echo -e "${YELLOW}Press Ctrl+C to stop all services${NC}" while true; do diff --git a/scripts/stop.sh b/scripts/stop.sh index ec17c7f..1db5095 100755 --- a/scripts/stop.sh +++ b/scripts/stop.sh @@ -34,26 +34,26 @@ print_error() { kill_service_by_pid() { local service_name=$1 local pid_file=$2 - + if [ -f "$pid_file" ]; then PID=$(cat "$pid_file") if kill -0 $PID 2>/dev/null; then echo -e "${YELLOW}🔪 Stopping $service_name (PID: $PID)...${NC}" kill $PID - + # Wait for graceful shutdown local count=0 while kill -0 $PID 2>/dev/null && [ $count -lt 10 ]; do sleep 1 count=$((count + 1)) done - + # Force kill if still running if kill -0 $PID 2>/dev/null; then print_warning "Force killing $service_name..." kill -9 $PID 2>/dev/null || true fi - + print_status "$service_name stopped" else print_warning "$service_name was not running" @@ -68,12 +68,12 @@ kill_service_by_pid() { kill_port_processes() { local port=$1 local service_name=$2 - + echo -e "${YELLOW}🔍 Checking for processes on port $port...${NC}" - + # Find processes using the port PIDS=$(lsof -ti:$port 2>/dev/null || true) - + if [ ! -z "$PIDS" ]; then echo -e "${YELLOW}🔪 Killing $service_name processes on port $port: $PIDS${NC}" echo $PIDS | xargs kill -9 2>/dev/null || true @@ -86,7 +86,7 @@ kill_port_processes() { # Function to cleanup temporary files cleanup_temp_files() { echo -e "${BLUE}🧹 Cleaning up temporary files...${NC}" - + # Clean up any temporary agent files if [ -d "storage" ]; then # Remove any incomplete task directories (optional) @@ -100,28 +100,28 @@ cleanup_temp_files() { # Main shutdown function main() { echo -e "${BLUE}🛑 Shutting down services...${NC}" - + # Stop services by PID files kill_service_by_pid "Worker" "logs/worker.pid" kill_service_by_pid "API Server" "logs/api.pid" - + # Kill any remaining processes on API port kill_port_processes $API_PORT "API Server" - + # Cleanup temporary files cleanup_temp_files - + # Remove log files if requested if [ "$1" = "--clean-logs" ]; then echo -e "${BLUE}🧹 Cleaning log files...${NC}" rm -f logs/*.log 2>/dev/null || true print_status "Log files cleaned" fi - + echo -e "${BLUE}========================================${NC}" echo -e "${GREEN}✅ All services stopped successfully!${NC}" echo -e "${BLUE}========================================${NC}" - + if [ "$1" != "--clean-logs" ]; then echo -e "${YELLOW}Log files preserved in logs/ directory${NC}" echo -e "${YELLOW}Use: ./scripts/stop.sh --clean-logs to remove them${NC}" diff --git a/static/index.html b/static/index.html index d6e3667..dcfed6f 100644 --- a/static/index.html +++ b/static/index.html @@ -38,13 +38,13 @@

Memory-Break Orchestrator

Evaluate AI agent memory compression across iFlow, Claude, and Gemini

- +
System Status: @@ -53,7 +53,7 @@

Memory-Break Orchestrator

| Agents: {{ systemHealth.agents.available }}/{{ systemHealth.agents.total }} available
- +
{{ error }} @@ -61,27 +61,27 @@

Memory-Break Orchestrator

{{ success }}
- +

Create New Memory-Break Task

-
- +
@@ -97,14 +97,14 @@

Create New Memory-Break Ta

- +
- -
- +

Tasks

-
- +

No tasks yet. Create your first task above!

- +
-
{{ task.id.substring(0, 8) }}... @@ -155,20 +155,20 @@

Tasks

{{ task.repo }} #{{ task.pr_number }} -
- +
Repository
- {{ task.repo }} #{{ task.pr_number }} @@ -186,35 +186,35 @@

Tasks

{{ task.changed_files.length }} files
- +
- - + - + - - - + - +
@@ -224,7 +224,7 @@

Tasks

Processing...
- +

🏆 Leaderboard

@@ -245,7 +245,7 @@

🏆 - 🥇 @@ -288,12 +288,12 @@

🏆

- +

Agent Results

-
{{ agent.agent }}
@@ -308,7 +308,7 @@
{{ agent.agent }}
{{ agent.status }}
- +
Stats
@@ -318,7 +318,7 @@
{{ agent.agent }}
- +
Artifacts
@@ -342,7 +342,7 @@
{{ agent.agent }}