From 9caa8977e759468320090e88ce34eca50c997fcd Mon Sep 17 00:00:00 2001 From: James Braza Date: Tue, 3 Sep 2024 08:55:18 -0700 Subject: [PATCH 1/5] Added basic tooling configs --- .gitignore | 299 +++++++++ .mailmap | 5 + .pre-commit-config.yaml | 67 ++ .python-version | 1 + .secrets.allowlist | 0 pyproject.toml | 370 +++++++++++ uv.lock | 1383 +++++++++++++++++++++++++++++++++++++++ 7 files changed, 2125 insertions(+) create mode 100644 .gitignore create mode 100644 .mailmap create mode 100644 .pre-commit-config.yaml create mode 100644 .python-version create mode 100644 .secrets.allowlist create mode 100644 pyproject.toml create mode 100644 uv.lock diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..01a52fbd --- /dev/null +++ b/.gitignore @@ -0,0 +1,299 @@ +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon[\r] + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# Version files made by setuptools_scm +**/version.py diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..206df6cb --- /dev/null +++ b/.mailmap @@ -0,0 +1,5 @@ +Andrew White +James Braza +Michael Skarlinski mskarlin <12701035+mskarlin@users.noreply.github.com> +Ryan-Rhys Griffiths +Siddharth Narayanan diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..c0b97dcc --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,67 @@ +--- +default_language_version: + python: python3 +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: check-added-large-files + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-symlinks + - id: check-toml + - id: check-yaml + - id: debug-statements + - id: detect-private-key + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.3 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format + - repo: https://github.com/rbubley/mirrors-prettier + rev: v3.3.3 + hooks: + - id: prettier + - repo: https://github.com/Yelp/detect-secrets + rev: v1.5.0 + hooks: + - id: detect-secrets + additional_dependencies: [".[word_list]"] + args: + - --word-list=.secrets.allowlist + - --exclude-files=.secrets.baseline$ + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: check-mailmap + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 + hooks: + - id: codespell + additional_dependencies: [".[toml]"] + exclude_types: [jupyter] + - repo: https://github.com/pappasam/toml-sort + rev: v0.23.1 + hooks: + - id: toml-sort-fix + - repo: https://github.com/srstevenson/nb-clean + rev: 3.3.0 + hooks: + - id: nb-clean + args: [--preserve-cell-outputs, --remove-empty-cells] + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.19 + hooks: + - id: validate-pyproject + additional_dependencies: + - "validate-pyproject-schema-store[all]>=2024.08.19" # For Ruff renaming RUF025 to C420 + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.11.2 + hooks: + - id: mypy diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..d9506ceb --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12.5 diff --git a/.secrets.allowlist b/.secrets.allowlist new file mode 100644 index 00000000..e69de29b diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..c67f101a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,370 @@ +[build-system] +build-backend = "setuptools.build_meta" +requires = ["setuptools>=64", "setuptools_scm>=8"] + +[project] +authors = [ + {email = "hello@futurehouse.org", name = "FutureHouse technical staff"}, +] +# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers +classifiers = [ + "Operating System :: OS Independent", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python", +] +dependencies = [ +] +description = "Agent framework for constructing language model agents and training on constructive tasks." +dynamic = ["version"] +name = "ldp" +readme = "README.md" +requires-python = ">=3.11" + +[project.urls] +issues = "https://github.com/Future-House/ldp/issues" +repository = "https://github.com/Future-House/ldp" + +[tool.black] +preview = true + +[tool.codeflash] +disable-imports-sorting = true +disable-telemetry = true +formatter-cmds = ["ruff check --exit-zero --fix $file", "ruff format $file"] +module-root = "ldp" +test-framework = "pytest" +tests-root = "tests" + +[tool.codespell] +check-filenames = true +check-hidden = true +ignore-words-list = "astroid,ser" + +[tool.mypy] +# Type-checks the interior of functions without type annotations. +check_untyped_defs = true +# Allows enabling one or multiple error codes globally. Note: This option will +# override disabled error codes from the disable_error_code option. +enable_error_code = [ + "ignore-without-code", + "redundant-cast", + "redundant-expr", + "redundant-self", + "truthy-bool", + "truthy-iterable", + "unimported-reveal", + "unreachable", + "unused-ignore", +] +# Shows a short summary line after error messages. +error_summary = false +# A regular expression that matches file names, directory names and paths which mypy +# should ignore while recursively discovering files to check. Use forward slashes (/) as +# directory separators on all platforms. +exclude = [ + "^\\.?venv", # SEE: https://regex101.com/r/0rp5Br/1 +] +# This flag tells mypy that top-level packages will be based in either the current +# directory, or a member of the MYPYPATH environment variable or mypy_path config +# option. This option is only useful in the absence of __init__.py. See Mapping file +# paths to modules for details. +explicit_package_bases = true +# Specifies the OS platform for the target program, for example darwin or win32 +# (meaning OS X or Windows, respectively). The default is the current platform +# as revealed by Python’s sys.platform variable. +platform = "linux" +# Comma-separated list of mypy plugins. +plugins = ["pydantic.mypy"] +# Use visually nicer output in error messages: use soft word wrap, show source +# code snippets, and show error location markers. +pretty = true +# Shows column numbers in error messages. +show_column_numbers = true +# Shows error codes in error messages. +# SEE: https://mypy.readthedocs.io/en/stable/error_codes.html#error-codes +show_error_codes = true +# Prefixes each error with the relevant context. +show_error_context = true +# Warns about casting an expression to its inferred type. +warn_redundant_casts = true +# Shows a warning when encountering any code inferred to be unreachable or +# redundant after performing type analysis. +warn_unreachable = true +# Warns about per-module sections in the config file that do not match any +# files processed when invoking mypy. +warn_unused_configs = true +# Warns about unneeded `# type: ignore` comments. +warn_unused_ignores = true + +[tool.pylint] + +[tool.pylint.design] +# Maximum number of attributes for a class (see R0902). +max-attributes = 12 + +[tool.pylint.format] +# Maximum number of characters on a single line. +max-line-length = 88 # Match ruff line-length + +[tool.pylint.main] +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs = 0 +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins = [ + "pylint_pydantic", +] + +[tool.pylint.messages_control] +# Disable the message, report, category or checker with the given id(s). +disable = [ + "arguments-differ", # Ops intentionally differ arguments + "attribute-defined-outside-init", # Disagrees with reset pattern + "bare-except", # Rely on ruff E722 for this + "broad-exception-caught", # Don't care to enforce this + "broad-exception-raised", # Rely on ruff TRY002 for this + "cyclic-import", # Let Python blow up + "expression-not-assigned", # Rely on mypy func-returns-value for this + "fixme", # codetags are useful + "function-redefined", # Rely on mypy no-redef for this + "import-outside-toplevel", # Rely on ruff PLC0415 for this + "invalid-name", # Don't care to enforce this + "line-too-long", # Rely on ruff E501 for this + "logging-fstring-interpolation", # f-strings are convenient + "logging-too-many-args", # Rely on ruff PLE1205 for this + "missing-docstring", # Let docformatter and ruff take care of docstrings + "missing-final-newline", # Rely on ruff W292 for this + "no-else-return", # Rely on ruff RET506 for this + "no-member", # Buggy, SEE: https://github.com/pylint-dev/pylint/issues/8138 + "not-callable", # Don't care to enforce this + "protected-access", # Don't care to enforce this + "raise-missing-from", # Rely on ruff B904 for this + "redefined-builtin", # Rely on ruff A002 for this + "super-init-not-called", # Don't care to enforce this + "too-few-public-methods", # Don't care to enforce this + "too-many-ancestors", # Don't care to enforce this + "too-many-arguments", # Don't care to enforce this + "too-many-branches", # Rely on ruff PLR0912 for this + "too-many-instance-attributes", # Don't care to enforce this + "too-many-lines", # Don't care to enforce this + "too-many-locals", # Rely on ruff PLR0914 for this + "too-many-return-statements", # Rely on ruff PLR0911 for this + "too-many-statements", # Rely on ruff PLR0915 for this + "ungrouped-imports", # Rely on ruff I001 for this + "unspecified-encoding", # Don't care to enforce this + "unsubscriptable-object", # Buggy, SEE: https://github.com/PyCQA/pylint/issues/3637 + "unsupported-membership-test", # Buggy, SEE: https://github.com/pylint-dev/pylint/issues/3045 + "unused-argument", # Rely on ruff ARG002 for this + "unused-import", # Rely on ruff F401 for this + "unused-variable", # Rely on ruff F841 for this + "wrong-import-order", # Rely on ruff I001 for this + "wrong-import-position", # Rely on ruff E402 for this +] +# Enable the message, report, category or checker with the given id(s). +enable = [ + "useless-suppression", # Print unused `pylint: disable` comments +] + +[tool.pylint.reports] +# Set true to activate the evaluation score. +score = false + +[tool.pylint.similarities] +# Minimum lines number of a similarity. +min-similarity-lines = 12 + +[tool.pytest.ini_options] +# Add the specified OPTS to the set of command line arguments as if they had been +# specified by the user. +addopts = "--doctest-modules" +# List of directories that should be searched for tests when no specific directories, +# files or test ids are given in the command line when executing pytest from the rootdir +# directory. File system paths may use shell-style wildcards, including the recursive ** +# pattern. +testpaths = ["src", "tests"] + +[tool.refurb] +enable_all = true +ignore = [ + "FURB101", # FURB101, FURB103, FURB141, FURB144, FURB146, FURB147, FURB150, FURB155: no need for pathlib + "FURB103", + "FURB118", # We often use inspect.signature in FxnOp. In 3.11, this doesn't work on operator.itemgetter. + "FURB141", + "FURB144", + "FURB146", + "FURB147", + "FURB150", + "FURB155", +] + +[tool.ruff] +# Line length to use when enforcing long-lines violations (like `E501`). +line-length = 88 +# The minimum Python version to target, e.g., when considering automatic code +# upgrades, like rewriting type annotations. Ruff will not propose changes +# using features that are not available in the given version. +target-version = "py311" +# Enable application of unsafe fixes. +unsafe-fixes = true + +[tool.ruff.format] +# Enable reformatting of code snippets in docstrings. +docstring-code-format = true +# Enable preview style formatting. +preview = true + +[tool.ruff.lint] +explicit-preview-rules = true +extend-select = [ + "C420", + "DOC202", + "DOC403", + "FURB110", + "FURB113", + "FURB116", + "FURB131", + "FURB132", + "FURB140", + "FURB142", + "FURB145", + "FURB148", + "FURB152", + "FURB154", + "FURB157", + "FURB164", + "FURB166", + "FURB171", + "FURB180", + "FURB192", + "PLR6104", + "PLR6201", + "PLW0108", + "RUF022", +] +external = [ + "FURB", # refurb +] +# List of rule codes that are unsupported by Ruff, but should be preserved when +# (e.g.) validating # noqa directives. Useful for retaining # noqa directives +# that cover plugins not yet implemented by Ruff. +ignore = [ + "ANN", # Don't care to enforce typing + "ARG002", # Thrown all the time when we are subclassing + "ARG003", # Thrown all the time when we are subclassing + "ASYNC109", # Buggy, SEE: https://github.com/astral-sh/ruff/issues/12353 + "ASYNC2", # It's ok to mix async and sync ops (like opening a file) + "BLE001", # Don't care to enforce blind exception catching + "COM812", # Trailing comma with black leads to wasting lines + "D100", # D100, D101, D102, D103, D104, D105, D106, D107: don't always need docstrings + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", + "D203", # Keep docstring next to the class definition (covered by D211) + "D212", # Summary should be on second line (opposite of D213) + "D402", # It's nice to reuse the method name + "D406", # Google style requires ":" at end + "D407", # We aren't using numpy style + "D413", # Blank line after last section. -> No blank line + "DTZ", # Don't care to have timezone safety + "EM", # Overly pedantic + "ERA001", # Don't care to prevent commented code + "FBT001", # FBT001, FBT002: overly pedantic + "FBT002", + "FIX", # Don't care to prevent TODO, FIXME, etc. + "FLY002", # Can be less readable + "G004", # f-strings are convenient + "INP001", # Can use namespace packages + "ISC001", # For ruff format compatibility + "N803", # Allow matching math formula names/conventions + "N806", # Allow matching math formula names/conventions + "PLC0415", # Don't care to prevent imports outside of top-level + "PLR0912", # Allow us to have lots of branches + "PLR0913", + "PLW2901", # Allow modifying loop variables + "PTH", # Overly pedantic + "S101", # Don't care to prevent asserts + "S105", # Duplicates Yelp/detect-secrets in pre-commit + "S311", # Ok to use python random + "SLF001", # Overly pedantic + "T201", # Overly pedantic + "TCH001", # TCH001, TCH002, TCH003: don't care to enforce type checking blocks + "TCH002", + "TCH003", + "TD002", # Don't care for TODO author + "TD003", # Don't care for TODO links + "TRY003", # Overly pedantic +] +preview = true +select = ["ALL"] +unfixable = [ + "B007", # While debugging, unused loop variables can be useful + "B905", # Default fix is zip(strict=False), but that can hide bugs + "ERA001", # While debugging, temporarily commenting code can be useful + "F401", # While debugging, unused imports can be useful + "F841", # While debugging, unused locals can be useful +] + +[tool.ruff.lint.flake8-annotations] +mypy-init-return = true + +[tool.ruff.lint.per-file-ignores] +"**/tests/*.py" = [ + "E501", # Tests can have long strings + "F841", # Tests can have unused locals + "N802", # Tests function names can match class names + "PLR2004", # Tests can have magic values +] + +[tool.ruff.lint.pycodestyle] +# The maximum line length to allow for line-length violations within +# documentation (W505), including standalone comments. +max-doc-length = 120 # Match line-length +# The maximum line length to allow for line-too-long violations. By default, +# this is set to the value of the line-length option. +max-line-length = 120 + +[tool.ruff.lint.pydocstyle] +# Whether to use Google-style or NumPy-style conventions or the PEP257 +# defaults when analyzing docstring sections. +convention = "google" + +[tool.setuptools.packages.find] +include = ["ldp*"] + +[tool.setuptools_scm] +version_file = "ldp/version.py" + +[tool.tomlsort] +all = true +in_place = true +spaces_before_inline_comment = 2 # Match Python PEP 8 +spaces_indent_inline_array = 4 # Match Python PEP 8 +trailing_comma_inline_array = true + +[tool.uv] +dev-dependencies = [ + "SQLAlchemy[aiosqlite]~=2.0", # Match aviary dependencies + "build", # TODO: remove after https://github.com/astral-sh/uv/issues/6278 + "codeflash", + "ipython>=8", # Pin to keep recent + "mypy>=1.8", # Pin for mutable-override + "pre-commit~=3.4", + "pylint-pydantic", + "pylint>=3.2", + "pytest-asyncio", + "pytest-subtests", + "pytest-sugar", + "pytest-timer[colorama]", + "pytest-xdist", + "pytest>=8", # Pin to keep recent + "refurb>=2", # Pin to keep recent + "typeguard", +] diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..82e43446 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1383 @@ +version = 1 +requires-python = ">=3.11" +resolution-markers = [ + "python_full_version < '3.12'", + "python_full_version < '3.13'", + "python_full_version >= '3.13'", +] + +[[package]] +name = "aiosqlite" +version = "0.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/3a/22ff5415bf4d296c1e92b07fd746ad42c96781f13295a074d58e77747848/aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7", size = 21691 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c4/c93eb22025a2de6b83263dfe3d7df2e19138e345bca6f18dba7394120930/aiosqlite-0.20.0-py3-none-any.whl", hash = "sha256:36a1deaca0cac40ebe32aac9977a6e2bbc7f5189f23f4a54d5908986729e5bd6", size = 15564 }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "ansicon" +version = "1.89.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/e2/1c866404ddbd280efedff4a9f15abfe943cb83cde6e895022370f3a61f85/ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1", size = 67312 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/f9/f1c10e223c7b56a38109a3f2eb4e7fe9a757ea3ed3a166754fb30f65e466/ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec", size = 63675 }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348 }, +] + +[[package]] +name = "asttokens" +version = "2.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/45/1d/f03bcb60c4a3212e15f99a56085d93093a497718adf828d050b9d675da81/asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0", size = 62284 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/86/4736ac618d82a20d87d2f92ae19441ebc7ac9e7a581d7e58bbe79233b24a/asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", size = 27764 }, +] + +[[package]] +name = "attrs" +version = "24.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/0f/aafca9af9315aee06a89ffde799a10a582fe8de76c563ee80bbcdc08b3fb/attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346", size = 792678 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/21/5b6702a7f963e95456c0de2d495f67bf5fd62840ac655dc451586d23d39a/attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2", size = 63001 }, +] + +[[package]] +name = "backoff" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148 }, +] + +[[package]] +name = "black" +version = "24.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/b0/46fb0d4e00372f4a86a6f8efa3cb193c9f64863615e39010b1477e010578/black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f", size = 644810 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/a6/0a3aa89de9c283556146dc6dbda20cd63a9c94160a6fbdebaf0918e4a3e1/black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1", size = 1615080 }, + { url = "https://files.pythonhosted.org/packages/db/94/b803d810e14588bb297e565821a947c108390a079e21dbdcb9ab6956cd7a/black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af", size = 1438143 }, + { url = "https://files.pythonhosted.org/packages/a5/b5/f485e1bbe31f768e2e5210f52ea3f432256201289fd1a3c0afda693776b0/black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4", size = 1738774 }, + { url = "https://files.pythonhosted.org/packages/a8/69/a000fc3736f89d1bdc7f4a879f8aaf516fb03613bb51a0154070383d95d9/black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af", size = 1427503 }, + { url = "https://files.pythonhosted.org/packages/a2/a8/05fb14195cfef32b7c8d4585a44b7499c2a4b205e1662c427b941ed87054/black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368", size = 1646132 }, + { url = "https://files.pythonhosted.org/packages/41/77/8d9ce42673e5cb9988f6df73c1c5c1d4e9e788053cccd7f5fb14ef100982/black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed", size = 1448665 }, + { url = "https://files.pythonhosted.org/packages/cc/94/eff1ddad2ce1d3cc26c162b3693043c6b6b575f538f602f26fe846dfdc75/black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018", size = 1762458 }, + { url = "https://files.pythonhosted.org/packages/28/ea/18b8d86a9ca19a6942e4e16759b2fa5fc02bbc0eb33c1b866fcd387640ab/black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2", size = 1436109 }, + { url = "https://files.pythonhosted.org/packages/27/1e/83fa8a787180e1632c3d831f7e58994d7aaf23a0961320d21e84f922f919/black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed", size = 206504 }, +] + +[[package]] +name = "blessed" +version = "1.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinxed", marker = "platform_system == 'Windows'" }, + { name = "six" }, + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/ae/92e9968ad23205389ec6bd82e2d4fca3817f1cdef34e10aa8d529ef8b1d7/blessed-1.20.0.tar.gz", hash = "sha256:2cdd67f8746e048f00df47a2880f4d6acbcdb399031b604e34ba8f71d5787680", size = 6655612 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/98/584f211c3a4bb38f2871fa937ee0cc83c130de50c955d6c7e2334dbf4acb/blessed-1.20.0-py2.py3-none-any.whl", hash = "sha256:0c542922586a265e699188e52d5f5ac5ec0dd517e5a1041d90d2bbf23f906058", size = 58372 }, +] + +[[package]] +name = "build" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "os_name == 'nt'" }, + { name = "packaging" }, + { name = "pyproject-hooks" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/9e/2d725d2f7729c6e79ca62aeb926492abbc06e25910dd30139d60a68bcb19/build-1.2.1.tar.gz", hash = "sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d", size = 44781 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/03/f3c8ba0a6b6e30d7d18c40faab90807c9bb5e9a1e3b2fe2008af624a9c97/build-1.2.1-py3-none-any.whl", hash = "sha256:75e10f767a433d9a86e50d83f418e83efc18ede923ee5ff7df93b6cb0306c5d4", size = 21911 }, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 }, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/09/c1bc53dab74b1816a00d8d030de5bf98f724c52c1635e07681d312f20be8/charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", size = 104809 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/77/02839016f6fbbf808e8b38601df6e0e66c17bbab76dff4613f7511413597/charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", size = 191647 }, + { url = "https://files.pythonhosted.org/packages/3e/33/21a875a61057165e92227466e54ee076b73af1e21fe1b31f1e292251aa1e/charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", size = 121434 }, + { url = "https://files.pythonhosted.org/packages/dd/51/68b61b90b24ca35495956b718f35a9756ef7d3dd4b3c1508056fa98d1a1b/charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", size = 118979 }, + { url = "https://files.pythonhosted.org/packages/e4/a6/7ee57823d46331ddc37dd00749c95b0edec2c79b15fc0d6e6efb532e89ac/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", size = 136582 }, + { url = "https://files.pythonhosted.org/packages/74/f1/0d9fe69ac441467b737ba7f48c68241487df2f4522dd7246d9426e7c690e/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", size = 146645 }, + { url = "https://files.pythonhosted.org/packages/05/31/e1f51c76db7be1d4aef220d29fbfa5dbb4a99165d9833dcbf166753b6dc0/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", size = 139398 }, + { url = "https://files.pythonhosted.org/packages/40/26/f35951c45070edc957ba40a5b1db3cf60a9dbb1b350c2d5bef03e01e61de/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", size = 140273 }, + { url = "https://files.pythonhosted.org/packages/07/07/7e554f2bbce3295e191f7e653ff15d55309a9ca40d0362fcdab36f01063c/charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", size = 142577 }, + { url = "https://files.pythonhosted.org/packages/d8/b5/eb705c313100defa57da79277d9207dc8d8e45931035862fa64b625bfead/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", size = 137747 }, + { url = "https://files.pythonhosted.org/packages/19/28/573147271fd041d351b438a5665be8223f1dd92f273713cb882ddafe214c/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", size = 143375 }, + { url = "https://files.pythonhosted.org/packages/cf/7c/f3b682fa053cc21373c9a839e6beba7705857075686a05c72e0f8c4980ca/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/1e/49/7ab74d4ac537ece3bc3334ee08645e231f39f7d6df6347b29a74b0537103/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", size = 140232 }, + { url = "https://files.pythonhosted.org/packages/2d/dc/9dacba68c9ac0ae781d40e1a0c0058e26302ea0660e574ddf6797a0347f7/charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", size = 140859 }, + { url = "https://files.pythonhosted.org/packages/6c/c2/4a583f800c0708dd22096298e49f887b49d9746d0e78bfc1d7e29816614c/charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", size = 92509 }, + { url = "https://files.pythonhosted.org/packages/57/ec/80c8d48ac8b1741d5b963797b7c0c869335619e13d4744ca2f67fc11c6fc/charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", size = 99870 }, + { url = "https://files.pythonhosted.org/packages/d1/b2/fcedc8255ec42afee97f9e6f0145c734bbe104aac28300214593eb326f1d/charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", size = 192892 }, + { url = "https://files.pythonhosted.org/packages/2e/7d/2259318c202f3d17f3fe6438149b3b9e706d1070fe3fcbb28049730bb25c/charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", size = 122213 }, + { url = "https://files.pythonhosted.org/packages/3a/52/9f9d17c3b54dc238de384c4cb5a2ef0e27985b42a0e5cc8e8a31d918d48d/charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", size = 119404 }, + { url = "https://files.pythonhosted.org/packages/99/b0/9c365f6d79a9f0f3c379ddb40a256a67aa69c59609608fe7feb6235896e1/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", size = 137275 }, + { url = "https://files.pythonhosted.org/packages/91/33/749df346e93d7a30cdcb90cbfdd41a06026317bfbfb62cd68307c1a3c543/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", size = 147518 }, + { url = "https://files.pythonhosted.org/packages/72/1a/641d5c9f59e6af4c7b53da463d07600a695b9824e20849cb6eea8a627761/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", size = 140182 }, + { url = "https://files.pythonhosted.org/packages/ee/fb/14d30eb4956408ee3ae09ad34299131fb383c47df355ddb428a7331cfa1e/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", size = 141869 }, + { url = "https://files.pythonhosted.org/packages/df/3e/a06b18788ca2eb6695c9b22325b6fde7dde0f1d1838b1792a0076f58fe9d/charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", size = 144042 }, + { url = "https://files.pythonhosted.org/packages/45/59/3d27019d3b447a88fe7e7d004a1e04be220227760264cc41b405e863891b/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", size = 138275 }, + { url = "https://files.pythonhosted.org/packages/7b/ef/5eb105530b4da8ae37d506ccfa25057961b7b63d581def6f99165ea89c7e/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", size = 144819 }, + { url = "https://files.pythonhosted.org/packages/a2/51/e5023f937d7f307c948ed3e5c29c4b7a3e42ed2ee0b8cdf8f3a706089bf0/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", size = 149415 }, + { url = "https://files.pythonhosted.org/packages/24/9d/2e3ef673dfd5be0154b20363c5cdcc5606f35666544381bee15af3778239/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", size = 141212 }, + { url = "https://files.pythonhosted.org/packages/5b/ae/ce2c12fcac59cb3860b2e2d76dc405253a4475436b1861d95fe75bdea520/charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", size = 142167 }, + { url = "https://files.pythonhosted.org/packages/ed/3a/a448bf035dce5da359daf9ae8a16b8a39623cc395a2ffb1620aa1bce62b0/charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", size = 93041 }, + { url = "https://files.pythonhosted.org/packages/b6/7c/8debebb4f90174074b827c63242c23851bdf00a532489fba57fef3416e40/charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", size = 100397 }, + { url = "https://files.pythonhosted.org/packages/28/76/e6222113b83e3622caa4bb41032d0b1bf785250607392e1b778aca0b8a7d/charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", size = 48543 }, +] + +[[package]] +name = "click" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 }, +] + +[[package]] +name = "codeflash" +version = "0.6.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "black" }, + { name = "click" }, + { name = "dill" }, + { name = "gitpython" }, + { name = "humanize" }, + { name = "inquirer" }, + { name = "isort" }, + { name = "jedi" }, + { name = "junitparser" }, + { name = "libcst" }, + { name = "parameterized" }, + { name = "posthog" }, + { name = "pydantic" }, + { name = "pytest" }, + { name = "pytest-timeout" }, + { name = "returns" }, + { name = "sentry-sdk" }, + { name = "tiktoken" }, + { name = "timeout-decorator" }, + { name = "tomlkit" }, + { name = "unidiff" }, + { name = "unittest-xml-reporting" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/05/06b72652b056619e076c3cd9dd9cfc00fd5b1d291af5e9a00168041aaddf/codeflash-0.6.21.tar.gz", hash = "sha256:d523986f99960fbc6418bb68ac709f3d559ae30ea546737987ad94f2b4a417da", size = 80215 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/af/8fd2225cef290dd6f5018f63935905b37f10b08004718e7edc71463e658d/codeflash-0.6.21-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:960423b33ab31eb01ac030989de4fa0c78e9dac136a58dac1b8434d5953baaf1", size = 96714 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252 }, +] + +[[package]] +name = "distlib" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/91/e2df406fb4efacdf46871c25cde65d3c6ee5e173b7e5a4547a47bae91920/distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64", size = 609931 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/41/9307e4f5f9976bc8b7fea0b66367734e8faf3ec84bc0d412d8cfabbb66cd/distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784", size = 468850 }, +] + +[[package]] +name = "editor" +version = "1.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "runs" }, + { name = "xmod" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/92/734a4ab345914259cb6146fd36512608ea42be16195375c379046f33283d/editor-1.6.6.tar.gz", hash = "sha256:bb6989e872638cd119db9a4fce284cd8e13c553886a1c044c6b8d8a160c871f8", size = 3197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/c2/4bc8cd09b14e28ce3f406a8b05761bed0d785d1ca8c2a5c6684d884c66a2/editor-1.6.6-py3-none-any.whl", hash = "sha256:e818e6913f26c2a81eadef503a2741d7cca7f235d20e217274a009ecd5a74abf", size = 4017 }, +] + +[[package]] +name = "execnet" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, +] + +[[package]] +name = "executing" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/e3/7d45f492c2c4a0e8e0fad57d081a7c8a0286cdd86372b070cca1ec0caa1e/executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab", size = 977485 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/fd/afcd0496feca3276f509df3dbd5dae726fcc756f1a08d9e25abe1733f962/executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", size = 25805 }, +] + +[[package]] +name = "filelock" +version = "3.15.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/dd/49e06f09b6645156550fb9aee9cc1e59aba7efbc972d665a1bd6ae0435d4/filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb", size = 18007 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/f0/48285f0262fe47103a4a45972ed2f9b93e4c80b8fd609fa98da78b2a5706/filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7", size = 16159 }, +] + +[[package]] +name = "gitdb" +version = "4.0.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "smmap" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/0d/bbb5b5ee188dec84647a4664f3e11b06ade2bde568dbd489d9d64adef8ed/gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b", size = 394469 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/5b/8f0c4a5bb9fd491c277c21eff7ccae71b47d43c4446c9d0c6cff2fe8c2c4/gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4", size = 62721 }, +] + +[[package]] +name = "gitpython" +version = "3.1.43" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitdb" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b6/a1/106fd9fa2dd989b6fb36e5893961f82992cf676381707253e0bf93eb1662/GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c", size = 214149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff", size = 207337 }, +] + +[[package]] +name = "greenlet" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/14/3bddb1298b9a6786539ac609ba4b7c9c0842e12aa73aaa4d8d73ec8f8185/greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491", size = 182013 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/20/68a278a6f93fa36e21cfc3d7599399a8a831225644eb3b6b18755cd3d6fc/greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61", size = 271666 }, + { url = "https://files.pythonhosted.org/packages/21/b4/90e06e07c78513ab03855768200bdb35c8e764e805b3f14fb488e56f82dc/greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559", size = 657689 }, + { url = "https://files.pythonhosted.org/packages/f6/a2/0ed21078039072f9dc738bbf3af12b103a84106b1385ac4723841f846ce7/greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e", size = 673009 }, + { url = "https://files.pythonhosted.org/packages/42/11/42ad6b1104c357826bbee7d7b9e4f24dbd9fde94899a03efb004aab62963/greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33", size = 667432 }, + { url = "https://files.pythonhosted.org/packages/bb/6b/384dee7e0121cbd1757bdc1824a5ee28e43d8d4e3f99aa59521f629442fe/greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379", size = 667442 }, + { url = "https://files.pythonhosted.org/packages/c6/1f/12d5a6cc26e8b483c2e7975f9c22e088ac735c0d8dcb8a8f72d31a4e5f04/greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22", size = 620032 }, + { url = "https://files.pythonhosted.org/packages/c7/ec/85b647e59e0f137c7792a809156f413e38379cf7f3f2e1353c37f4be4026/greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3", size = 1154218 }, + { url = "https://files.pythonhosted.org/packages/94/ed/1e5f4bca691a81700e5a88e86d6f0e538acb10188cd2cc17140e523255ef/greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d", size = 1180754 }, + { url = "https://files.pythonhosted.org/packages/47/79/26d54d7d700ef65b689fc2665a40846d13e834da0486674a8d4f0f371a47/greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728", size = 292822 }, + { url = "https://files.pythonhosted.org/packages/a2/2f/461615adc53ba81e99471303b15ac6b2a6daa8d2a0f7f77fd15605e16d5b/greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be", size = 273085 }, + { url = "https://files.pythonhosted.org/packages/e9/55/2c3cfa3cdbb940cf7321fbcf544f0e9c74898eed43bf678abf416812d132/greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e", size = 660514 }, + { url = "https://files.pythonhosted.org/packages/38/77/efb21ab402651896c74f24a172eb4d7479f9f53898bd5e56b9e20bb24ffd/greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676", size = 674295 }, + { url = "https://files.pythonhosted.org/packages/74/3a/92f188ace0190f0066dca3636cf1b09481d0854c46e92ec5e29c7cefe5b1/greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc", size = 669395 }, + { url = "https://files.pythonhosted.org/packages/63/0f/847ed02cdfce10f0e6e3425cd054296bddb11a17ef1b34681fa01a055187/greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230", size = 670455 }, + { url = "https://files.pythonhosted.org/packages/bd/37/56b0da468a85e7704f3b2bc045015301bdf4be2184a44868c71f6dca6fe2/greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf", size = 625692 }, + { url = "https://files.pythonhosted.org/packages/7c/68/b5f4084c0a252d7e9c0d95fc1cfc845d08622037adb74e05be3a49831186/greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305", size = 1152597 }, + { url = "https://files.pythonhosted.org/packages/a4/fa/31e22345518adcd69d1d6ab5087a12c178aa7f3c51103f6d5d702199d243/greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6", size = 1181043 }, + { url = "https://files.pythonhosted.org/packages/53/80/3d94d5999b4179d91bcc93745d1b0815b073d61be79dd546b840d17adb18/greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2", size = 293635 }, +] + +[[package]] +name = "humanize" +version = "4.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/b1/c8f05d5dc8f64030d8cc71e91307c1daadf6ec0d70bcd6eabdfd9b6f153f/humanize-4.10.0.tar.gz", hash = "sha256:06b6eb0293e4b85e8d385397c5868926820db32b9b654b932f57fa41c23c9978", size = 79192 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/49/a29c79bea335e52fb512a43faf84998c184c87fef82c65f568f8c56f2642/humanize-4.10.0-py3-none-any.whl", hash = "sha256:39e7ccb96923e732b5c2e27aeaa3b10a8dfeeba3eb965ba7b74a3eb0e30040a6", size = 126957 }, +] + +[[package]] +name = "identify" +version = "2.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/32/f4/8e8f7db397a7ce20fbdeac5f25adaf567fc362472432938d25556008e03a/identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf", size = 99116 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/6c/a4f39abe7f19600b74528d0c717b52fff0b300bb0161081510d39c53cb00/identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0", size = 98962 }, +] + +[[package]] +name = "idna" +version = "3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/ac/e349c5e6d4543326c6883ee9491e3921e0d07b55fdf3cce184b40d63e72a/idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603", size = 189467 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/7e/d71db821f177828df9dea8c42ac46473366f191be53080e552e628aad991/idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac", size = 66894 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "inquirer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blessed" }, + { name = "editor" }, + { name = "readchar" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/06/ef91eb8f3feafb736aa33dcb278fc9555d17861aa571b684715d095db24d/inquirer-3.4.0.tar.gz", hash = "sha256:8edc99c076386ee2d2204e5e3653c2488244e82cb197b2d498b3c1b5ffb25d0b", size = 14472 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/b2/be907c8c0f8303bc4b10089f5470014c3bf3521e9b8d3decf3037fd94725/inquirer-3.4.0-py3-none-any.whl", hash = "sha256:bb0ec93c833e4ce7b51b98b1644b0a4d2bb39755c39787f6a504e4fee7a11b60", size = 18077 }, +] + +[[package]] +name = "ipython" +version = "8.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/24/d4fabaca03c8804bf0b8d994c8ae3a20e57e9330d277fb43d83e558dec5e/ipython-8.27.0.tar.gz", hash = "sha256:0b99a2dc9f15fd68692e898e5568725c6d49c527d36a9fb5960ffbdeaa82ff7e", size = 5494984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a2/6c725958e6f135d8e5de081e69841bb2c1d84b3fc259d02eb092b8fc203a/ipython-8.27.0-py3-none-any.whl", hash = "sha256:f68b3cb8bde357a5d7adc9598d57e22a45dfbea19eb6b98286fa3b288c9cd55c", size = 818986 }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310 }, +] + +[[package]] +name = "jedi" +version = "0.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/99/99b493cec4bf43176b678de30f81ed003fd6a647a301b9c927280c600f0a/jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd", size = 1227821 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/9f/bc63f0f0737ad7a60800bfd472a4836661adae21f9c2535f3957b1e54ceb/jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0", size = 1569361 }, +] + +[[package]] +name = "jinxed" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ansicon", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/d0/59b2b80e7a52d255f9e0ad040d2e826342d05580c4b1d7d7747cfb8db731/jinxed-1.3.0.tar.gz", hash = "sha256:1593124b18a41b7a3da3b078471442e51dbad3d77b4d4f2b0c26ab6f7d660dbf", size = 80981 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/e3/0e0014d6ab159d48189e92044ace13b1e1fe9aa3024ba9f4e8cf172aa7c2/jinxed-1.3.0-py2.py3-none-any.whl", hash = "sha256:b993189f39dc2d7504d802152671535b06d380b26d78070559551cbf92df4fc5", size = 33085 }, +] + +[[package]] +name = "junitparser" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/88/6a268028a297751ed73be8e291f12aa727caf22adbc218e8dfbafcc974af/junitparser-3.2.0.tar.gz", hash = "sha256:b05e89c27e7b74b3c563a078d6e055d95cf397444f8f689b0ca616ebda0b3c65", size = 20073 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/f9/321d566c9f2af81fdb4bb3d5900214116b47be9e26b82219da8b818d9da9/junitparser-3.2.0-py2.py3-none-any.whl", hash = "sha256:e14fdc0a999edfc15889b637390e8ef6ca09a49532416d3bd562857d42d4b96d", size = 13394 }, +] + +[[package]] +name = "ldp" +version = "0.1.dev1+g713bff3.d20240903" +source = { editable = "." } + +[package.dev-dependencies] +dev = [ + { name = "build" }, + { name = "codeflash" }, + { name = "ipython" }, + { name = "mypy" }, + { name = "pre-commit" }, + { name = "pylint" }, + { name = "pylint-pydantic" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-subtests" }, + { name = "pytest-sugar" }, + { name = "pytest-timer", extra = ["colorama"] }, + { name = "pytest-xdist" }, + { name = "refurb" }, + { name = "sqlalchemy", extra = ["aiosqlite"] }, + { name = "typeguard" }, +] + +[package.metadata] + +[package.metadata.requires-dev] +dev = [ + { name = "build" }, + { name = "codeflash" }, + { name = "ipython", specifier = ">=8" }, + { name = "mypy", specifier = ">=1.8" }, + { name = "pre-commit", specifier = "~=3.4" }, + { name = "pylint", specifier = ">=3.2" }, + { name = "pylint-pydantic" }, + { name = "pytest", specifier = ">=8" }, + { name = "pytest-asyncio" }, + { name = "pytest-subtests" }, + { name = "pytest-sugar" }, + { name = "pytest-timer", extras = ["colorama"] }, + { name = "pytest-xdist" }, + { name = "refurb", specifier = ">=2" }, + { name = "sqlalchemy", extras = ["aiosqlite"], specifier = "~=2.0" }, + { name = "typeguard" }, +] + +[[package]] +name = "libcst" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/bd/ff41d7a8efc4f60a61d903c3f9823565006f44f2b8b11c99701f552b0851/libcst-1.4.0.tar.gz", hash = "sha256:449e0b16604f054fa7f27c3ffe86ea7ef6c409836fe68fe4e752a1894175db00", size = 771364 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/2c/6bf8e4710afe1e0d45643e3726c0a956f5965555425cd7efa31e97cc7a6b/libcst-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6227562fc5c9c1efd15dfe90b0971ae254461b8b6b23c1b617139b6003de1c1", size = 2110723 }, + { url = "https://files.pythonhosted.org/packages/5d/82/652e041aa6e14751a2ce41e68e281d9d5a32864ba11a363e103c429bf0e8/libcst-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3399e6c95df89921511b44d8c5bf6a75bcbc2d51f1f6429763609ba005c10f6b", size = 2036982 }, + { url = "https://files.pythonhosted.org/packages/b8/d7/515b6187a900033467a4001bf8e2ed95f4961aa9bedf2bf39dfd68659157/libcst-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48601e3e590e2d6a7ab8c019cf3937c70511a78d778ab3333764531253acdb33", size = 2199286 }, + { url = "https://files.pythonhosted.org/packages/50/a1/2093f74a3f8936fcdaac01f86d1c5fa8f586202afa466a92332b9a461b14/libcst-1.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42797309bb725f0f000510d5463175ccd7155395f09b5e7723971b0007a976d", size = 2251591 }, + { url = "https://files.pythonhosted.org/packages/0a/6c/1eb258b0eba8f337e1e9bd40574247310670c036a3913c9b650d6d9cd4de/libcst-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb4e42ea107a37bff7f9fdbee9532d39f9ea77b89caa5c5112b37057b12e0838", size = 2335434 }, + { url = "https://files.pythonhosted.org/packages/6a/56/1c5a8385e9cc2d95d278cb8df48d11587c1c93b3b78c2edafd16b2bf11fa/libcst-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:9d0cc3c5a2a51fa7e1d579a828c0a2e46b2170024fd8b1a0691c8a52f3abb2d9", size = 2029195 }, + { url = "https://files.pythonhosted.org/packages/2f/09/e4374c8e9bde82a6197860b67ed0b0cd07c0fbc95fff035886382165a279/libcst-1.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7ece51d935bc9bf60b528473d2e5cc67cbb88e2f8146297e40ee2c7d80be6f13", size = 2106058 }, + { url = "https://files.pythonhosted.org/packages/61/8a/84810ea960ede8d15266cc5e135165d92aadb08956136e53926b3e037829/libcst-1.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:81653dea1cdfa4c6520a7c5ffb95fa4d220cbd242e446c7a06d42d8636bfcbba", size = 2032124 }, + { url = "https://files.pythonhosted.org/packages/08/1d/3e2ab936e4195df82b764b02631a865b65dcf252772ddfe5265d384a883d/libcst-1.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6abce0e66bba2babfadc20530fd3688f672d565674336595b4623cd800b91ef", size = 2195173 }, + { url = "https://files.pythonhosted.org/packages/11/38/30206bbcf31425f6fd01dae3cf23e35df790969243d39757ae743d8e6d67/libcst-1.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da9d7dc83801aba3b8d911f82dc1a375db0d508318bad79d9fb245374afe068", size = 2248523 }, + { url = "https://files.pythonhosted.org/packages/8c/02/1c9c908724c732f09b11493ee5d61893060ecc9a3dc4bc80032d1be87b37/libcst-1.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c54aa66c86d8ece9c93156a2cf5ca512b0dce40142fe9e072c86af2bf892411", size = 2326040 }, + { url = "https://files.pythonhosted.org/packages/04/32/7345f10a2dc728015920d689d5c1b8dc0232db321e172cdad2611e73c5b3/libcst-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:62e2682ee1567b6a89c91853865372bf34f178bfd237853d84df2b87b446e654", size = 2026263 }, +] + +[[package]] +name = "lxml" +version = "5.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/6b/20c3a4b24751377aaa6307eb230b66701024012c29dd374999cc92983269/lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f", size = 3679318 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/a8/449faa2a3cbe6a99f8d38dcd51a3ee8844c17862841a6f769ea7c2a9cd0f/lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b", size = 8141056 }, + { url = "https://files.pythonhosted.org/packages/ac/8a/ae6325e994e2052de92f894363b038351c50ee38749d30cc6b6d96aaf90f/lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18", size = 4425238 }, + { url = "https://files.pythonhosted.org/packages/f8/fb/128dddb7f9086236bce0eeae2bfb316d138b49b159f50bc681d56c1bdd19/lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442", size = 5095197 }, + { url = "https://files.pythonhosted.org/packages/b4/f9/a181a8ef106e41e3086629c8bdb2d21a942f14c84a0e77452c22d6b22091/lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4", size = 4809809 }, + { url = "https://files.pythonhosted.org/packages/25/2f/b20565e808f7f6868aacea48ddcdd7e9e9fb4c799287f21f1a6c7c2e8b71/lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f", size = 5407593 }, + { url = "https://files.pythonhosted.org/packages/23/0e/caac672ec246d3189a16c4d364ed4f7d6bf856c080215382c06764058c08/lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e", size = 4866657 }, + { url = "https://files.pythonhosted.org/packages/67/a4/1f5fbd3f58d4069000522196b0b776a014f3feec1796da03e495cf23532d/lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c", size = 4967017 }, + { url = "https://files.pythonhosted.org/packages/ee/73/623ecea6ca3c530dd0a4ed0d00d9702e0e85cd5624e2d5b93b005fe00abd/lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16", size = 4810730 }, + { url = "https://files.pythonhosted.org/packages/1d/ce/fb84fb8e3c298f3a245ae3ea6221c2426f1bbaa82d10a88787412a498145/lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79", size = 5455154 }, + { url = "https://files.pythonhosted.org/packages/b1/72/4d1ad363748a72c7c0411c28be2b0dc7150d91e823eadad3b91a4514cbea/lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080", size = 4969416 }, + { url = "https://files.pythonhosted.org/packages/42/07/b29571a58a3a80681722ea8ed0ba569211d9bb8531ad49b5cacf6d409185/lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654", size = 5013672 }, + { url = "https://files.pythonhosted.org/packages/b9/93/bde740d5a58cf04cbd38e3dd93ad1e36c2f95553bbf7d57807bc6815d926/lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d", size = 4878644 }, + { url = "https://files.pythonhosted.org/packages/56/b5/645c8c02721d49927c93181de4017164ec0e141413577687c3df8ff0800f/lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763", size = 5511531 }, + { url = "https://files.pythonhosted.org/packages/85/3f/6a99a12d9438316f4fc86ef88c5d4c8fb674247b17f3173ecadd8346b671/lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec", size = 5402065 }, + { url = "https://files.pythonhosted.org/packages/80/8a/df47bff6ad5ac57335bf552babfb2408f9eb680c074ec1ba412a1a6af2c5/lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be", size = 5069775 }, + { url = "https://files.pythonhosted.org/packages/08/ae/e7ad0f0fbe4b6368c5ee1e3ef0c3365098d806d42379c46c1ba2802a52f7/lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9", size = 3474226 }, + { url = "https://files.pythonhosted.org/packages/c3/b5/91c2249bfac02ee514ab135e9304b89d55967be7e53e94a879b74eec7a5c/lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1", size = 3814971 }, + { url = "https://files.pythonhosted.org/packages/eb/6d/d1f1c5e40c64bf62afd7a3f9b34ce18a586a1cccbf71e783cd0a6d8e8971/lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859", size = 8171753 }, + { url = "https://files.pythonhosted.org/packages/bd/83/26b1864921869784355459f374896dcf8b44d4af3b15d7697e9156cb2de9/lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e", size = 4441955 }, + { url = "https://files.pythonhosted.org/packages/e0/d2/e9bff9fb359226c25cda3538f664f54f2804f4b37b0d7c944639e1a51f69/lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f", size = 5050778 }, + { url = "https://files.pythonhosted.org/packages/88/69/6972bfafa8cd3ddc8562b126dd607011e218e17be313a8b1b9cc5a0ee876/lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e", size = 4748628 }, + { url = "https://files.pythonhosted.org/packages/5d/ea/a6523c7c7f6dc755a6eed3d2f6d6646617cad4d3d6d8ce4ed71bfd2362c8/lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179", size = 5322215 }, + { url = "https://files.pythonhosted.org/packages/99/37/396fbd24a70f62b31d988e4500f2068c7f3fd399d2fd45257d13eab51a6f/lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a", size = 4813963 }, + { url = "https://files.pythonhosted.org/packages/09/91/e6136f17459a11ce1757df864b213efbeab7adcb2efa63efb1b846ab6723/lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3", size = 4923353 }, + { url = "https://files.pythonhosted.org/packages/1d/7c/2eeecf87c9a1fca4f84f991067c693e67340f2b7127fc3eca8fa29d75ee3/lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1", size = 4740541 }, + { url = "https://files.pythonhosted.org/packages/3b/ed/4c38ba58defca84f5f0d0ac2480fdcd99fc7ae4b28fc417c93640a6949ae/lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d", size = 5346504 }, + { url = "https://files.pythonhosted.org/packages/a5/22/bbd3995437e5745cb4c2b5d89088d70ab19d4feabf8a27a24cecb9745464/lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c", size = 4898077 }, + { url = "https://files.pythonhosted.org/packages/0a/6e/94537acfb5b8f18235d13186d247bca478fea5e87d224644e0fe907df976/lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99", size = 4946543 }, + { url = "https://files.pythonhosted.org/packages/8d/e8/4b15df533fe8e8d53363b23a41df9be907330e1fa28c7ca36893fad338ee/lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff", size = 4816841 }, + { url = "https://files.pythonhosted.org/packages/1a/e7/03f390ea37d1acda50bc538feb5b2bda6745b25731e4e76ab48fae7106bf/lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a", size = 5417341 }, + { url = "https://files.pythonhosted.org/packages/ea/99/d1133ab4c250da85a883c3b60249d3d3e7c64f24faff494cf0fd23f91e80/lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8", size = 5327539 }, + { url = "https://files.pythonhosted.org/packages/7d/ed/e6276c8d9668028213df01f598f385b05b55a4e1b4662ee12ef05dab35aa/lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d", size = 5012542 }, + { url = "https://files.pythonhosted.org/packages/36/88/684d4e800f5aa28df2a991a6a622783fb73cf0e46235cfa690f9776f032e/lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30", size = 3486454 }, + { url = "https://files.pythonhosted.org/packages/fc/82/ace5a5676051e60355bd8fb945df7b1ba4f4fb8447f2010fb816bfd57724/lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f", size = 3816857 }, + { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, + { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, + { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, + { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, + { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, + { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, + { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, + { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, + { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, + { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, + { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, + { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, + { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, + { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, + { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, + { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, + { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350 }, +] + +[[package]] +name = "monotonic" +version = "1.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c", size = 8154 }, +] + +[[package]] +name = "mypy" +version = "1.11.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/86/5d7cbc4974fd564550b80fbb8103c05501ea11aa7835edf3351d90095896/mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79", size = 3078806 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/aa/cc56fb53ebe14c64f1fe91d32d838d6f4db948b9494e200d2f61b820b85d/mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385", size = 10859630 }, + { url = "https://files.pythonhosted.org/packages/04/c8/b19a760fab491c22c51975cf74e3d253b8c8ce2be7afaa2490fbf95a8c59/mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca", size = 10037973 }, + { url = "https://files.pythonhosted.org/packages/88/57/7e7e39f2619c8f74a22efb9a4c4eff32b09d3798335625a124436d121d89/mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104", size = 12416659 }, + { url = "https://files.pythonhosted.org/packages/fc/a6/37f7544666b63a27e46c48f49caeee388bf3ce95f9c570eb5cfba5234405/mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4", size = 12897010 }, + { url = "https://files.pythonhosted.org/packages/84/8b/459a513badc4d34acb31c736a0101c22d2bd0697b969796ad93294165cfb/mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6", size = 9562873 }, + { url = "https://files.pythonhosted.org/packages/35/3a/ed7b12ecc3f6db2f664ccf85cb2e004d3e90bec928e9d7be6aa2f16b7cdf/mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318", size = 10990335 }, + { url = "https://files.pythonhosted.org/packages/04/e4/1a9051e2ef10296d206519f1df13d2cc896aea39e8683302f89bf5792a59/mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36", size = 10007119 }, + { url = "https://files.pythonhosted.org/packages/f3/3c/350a9da895f8a7e87ade0028b962be0252d152e0c2fbaafa6f0658b4d0d4/mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987", size = 12506856 }, + { url = "https://files.pythonhosted.org/packages/b6/49/ee5adf6a49ff13f4202d949544d3d08abb0ea1f3e7f2a6d5b4c10ba0360a/mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca", size = 12952066 }, + { url = "https://files.pythonhosted.org/packages/27/c0/b19d709a42b24004d720db37446a42abadf844d5c46a2c442e2a074d70d9/mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70", size = 9664000 }, + { url = "https://files.pythonhosted.org/packages/42/3a/bdf730640ac523229dd6578e8a581795720a9321399de494374afc437ec5/mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12", size = 2619625 }, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, +] + +[[package]] +name = "packaging" +version = "24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985 }, +] + +[[package]] +name = "parameterized" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/49/00c0c0cc24ff4266025a53e41336b79adaa5a4ebfad214f433d623f9865e/parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1", size = 24351 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/2f/804f58f0b856ab3bf21617cccf5b39206e6c4c94c2cd227bde125ea6105f/parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b", size = 20475 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", size = 20916 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", size = 18146 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "posthog" +version = "3.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backoff" }, + { name = "monotonic" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/08/182fc7954d899396a6db18692806a853d4a8c7e41b0be0602d7e03855176/posthog-3.6.3.tar.gz", hash = "sha256:6e1104a20638eab2b5d9cde6b6202a2900d67436237b3ac3521614ec17686701", size = 48216 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/42/43f93132a661390a5e80e1f73914dbd98db15a3131d2abfe69fb27c03b55/posthog-3.6.3-py2.py3-none-any.whl", hash = "sha256:cdd6c5d8919fd6158bbc4103bccc7129c712d8104dc33828be02bada7b6320a4", size = 53040 }, +] + +[[package]] +name = "pre-commit" +version = "3.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/10/97ee2fa54dff1e9da9badbc5e35d0bbaef0776271ea5907eccf64140f72f/pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af", size = 177815 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/92/caae8c86e94681b42c246f0bca35c059a2f0529e5b92619f6aba4cf7e7b6/pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f", size = 204643 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.47" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/47/6d/0279b119dafc74c1220420028d490c4399b790fc1256998666e3a341879f/prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360", size = 425859 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/23/22750c4b768f09386d1c3cc4337953e8936f48a888fa6dddfb669b2c9088/prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", size = 386411 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pydantic" +version = "2.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8c/99/d0a5dca411e0a017762258013ba9905cd6e7baa9a3fd1fe8b6529472902e/pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a", size = 739834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/fa/b7f815b8c9ad021c07f88875b601222ef5e70619391ade4a49234d12d278/pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8", size = 423875 }, +] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/12/e3/0d5ad91211dba310f7ded335f4dad871172b9cc9ce204f5a56d76ccd6247/pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4", size = 388371 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/db/f6a724db226d990a329910727cfac43539ff6969edc217286dd05cda3ef6/pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312", size = 1834507 }, + { url = "https://files.pythonhosted.org/packages/9b/83/6f2bfe75209d557ae1c3550c1252684fc1827b8b12fbed84c3b4439e135d/pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88", size = 1773527 }, + { url = "https://files.pythonhosted.org/packages/93/ef/513ea76d7ca81f2354bb9c8d7839fc1157673e652613f7e1aff17d8ce05d/pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc", size = 1787879 }, + { url = "https://files.pythonhosted.org/packages/31/0a/ac294caecf235f0cc651de6232f1642bb793af448d1cfc541b0dc1fd72b8/pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43", size = 1774694 }, + { url = "https://files.pythonhosted.org/packages/46/a4/08f12b5512f095963550a7cb49ae010e3f8f3f22b45e508c2cb4d7744fce/pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6", size = 1976369 }, + { url = "https://files.pythonhosted.org/packages/15/59/b2495be4410462aedb399071c71884042a2c6443319cbf62d00b4a7ed7a5/pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121", size = 2691250 }, + { url = "https://files.pythonhosted.org/packages/3c/ae/fc99ce1ba791c9e9d1dee04ce80eef1dae5b25b27e3fc8e19f4e3f1348bf/pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1", size = 2061462 }, + { url = "https://files.pythonhosted.org/packages/44/bb/eb07cbe47cfd638603ce3cb8c220f1a054b821e666509e535f27ba07ca5f/pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b", size = 1893923 }, + { url = "https://files.pythonhosted.org/packages/ce/ef/5a52400553b8faa0e7f11fd7a2ba11e8d2feb50b540f9e7973c49b97eac0/pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27", size = 1966779 }, + { url = "https://files.pythonhosted.org/packages/4c/5b/fb37fe341344d9651f5c5f579639cd97d50a457dc53901aa8f7e9f28beb9/pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b", size = 2109044 }, + { url = "https://files.pythonhosted.org/packages/70/1a/6f7278802dbc66716661618807ab0dfa4fc32b09d1235923bbbe8b3a5757/pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a", size = 1708265 }, + { url = "https://files.pythonhosted.org/packages/35/7f/58758c42c61b0bdd585158586fecea295523d49933cb33664ea888162daf/pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2", size = 1901750 }, + { url = "https://files.pythonhosted.org/packages/6f/47/ef0d60ae23c41aced42921728650460dc831a0adf604bfa66b76028cb4d0/pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231", size = 1839225 }, + { url = "https://files.pythonhosted.org/packages/6a/23/430f2878c9cd977a61bb39f71751d9310ec55cee36b3d5bf1752c6341fd0/pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9", size = 1768604 }, + { url = "https://files.pythonhosted.org/packages/9e/2b/ec4e7225dee79e0dc80ccc3c35ab33cc2c4bbb8a1a7ecf060e5e453651ec/pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f", size = 1789767 }, + { url = "https://files.pythonhosted.org/packages/64/b0/38b24a1fa6d2f96af3148362e10737ec073768cd44d3ec21dca3be40a519/pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52", size = 1772061 }, + { url = "https://files.pythonhosted.org/packages/5e/da/bb73274c42cb60decfa61e9eb0c9029da78b3b9af0a9de0309dbc8ff87b6/pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237", size = 1974573 }, + { url = "https://files.pythonhosted.org/packages/c8/65/41693110fb3552556180460daffdb8bbeefb87fc026fd9aa4b849374015c/pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe", size = 2625596 }, + { url = "https://files.pythonhosted.org/packages/09/b3/a5a54b47cccd1ab661ed5775235c5e06924753c2d4817737c5667bfa19a8/pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e", size = 2099064 }, + { url = "https://files.pythonhosted.org/packages/52/fa/443a7a6ea54beaba45ff3a59f3d3e6e3004b7460bcfb0be77bcf98719d3b/pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24", size = 1900345 }, + { url = "https://files.pythonhosted.org/packages/8e/e6/9aca9ffae60f9cdf0183069de3e271889b628d0fb175913fcb3db5618fb1/pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1", size = 1968252 }, + { url = "https://files.pythonhosted.org/packages/46/5e/6c716810ea20a6419188992973a73c2fb4eb99cd382368d0637ddb6d3c99/pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd", size = 2119191 }, + { url = "https://files.pythonhosted.org/packages/06/fc/6123b00a9240fbb9ae0babad7a005d51103d9a5d39c957a986f5cdd0c271/pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688", size = 1717788 }, + { url = "https://files.pythonhosted.org/packages/d5/36/e61ad5a46607a469e2786f398cd671ebafcd9fb17f09a2359985c7228df5/pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d", size = 1898188 }, + { url = "https://files.pythonhosted.org/packages/49/75/40b0e98b658fdba02a693b3bacb4c875a28bba87796c7b13975976597d8c/pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686", size = 1838688 }, + { url = "https://files.pythonhosted.org/packages/75/02/d8ba2d4a266591a6a623c68b331b96523d4b62ab82a951794e3ed8907390/pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a", size = 1768409 }, + { url = "https://files.pythonhosted.org/packages/91/ae/25ecd9bc4ce4993e99a1a3c9ab111c082630c914260e129572fafed4ecc2/pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b", size = 1789317 }, + { url = "https://files.pythonhosted.org/packages/7a/80/72057580681cdbe55699c367963d9c661b569a1d39338b4f6239faf36cdc/pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19", size = 1771949 }, + { url = "https://files.pythonhosted.org/packages/a2/be/d9bbabc55b05019013180f141fcaf3b14dbe15ca7da550e95b60c321009a/pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac", size = 1974392 }, + { url = "https://files.pythonhosted.org/packages/79/2d/7bcd938c6afb0f40293283f5f09988b61fb0a4f1d180abe7c23a2f665f8e/pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703", size = 2625565 }, + { url = "https://files.pythonhosted.org/packages/ac/88/ca758e979457096008a4b16a064509028e3e092a1e85a5ed6c18ced8da88/pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c", size = 2098784 }, + { url = "https://files.pythonhosted.org/packages/eb/de/2fad6d63c3c42e472e985acb12ec45b7f56e42e6f4cd6dfbc5e87ee8678c/pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83", size = 1900198 }, + { url = "https://files.pythonhosted.org/packages/fe/50/077c7f35b6488dc369a6d22993af3a37901e198630f38ac43391ca730f5b/pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203", size = 1968005 }, + { url = "https://files.pythonhosted.org/packages/5d/1f/f378631574ead46d636b9a04a80ff878b9365d4b361b1905ef1667d4182a/pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0", size = 2118920 }, + { url = "https://files.pythonhosted.org/packages/7a/ea/e4943f17df7a3031d709481fe4363d4624ae875a6409aec34c28c9e6cf59/pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e", size = 1717397 }, + { url = "https://files.pythonhosted.org/packages/13/63/b95781763e8d84207025071c0cec16d921c0163c7a9033ae4b9a0e020dc7/pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20", size = 1898013 }, +] + +[[package]] +name = "pygments" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, +] + +[[package]] +name = "pylint" +version = "3.2.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/e8/d59ce8e54884c9475ed6510685ef4311a10001674c28703b23da30f3b24d/pylint-3.2.7.tar.gz", hash = "sha256:1b7a721b575eaeaa7d39db076b6e7743c993ea44f57979127c517c6c572c803e", size = 1511922 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/4d/c73bc0fca447b918611985c325cd7017fb762050eb9c6ac6fa7d9ac6fbe4/pylint-3.2.7-py3-none-any.whl", hash = "sha256:02f4aedeac91be69fb3b4bea997ce580a4ac68ce58b89eaefeaf06749df73f4b", size = 519906 }, +] + +[[package]] +name = "pylint-plugin-utils" +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pylint" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/d2/3b9728910bc69232ec38d8fb7053c03c887bfe7e6e170649b683dd351750/pylint_plugin_utils-0.8.2.tar.gz", hash = "sha256:d3cebf68a38ba3fba23a873809155562571386d4c1b03e5b4c4cc26c3eee93e4", size = 10674 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/ee/49d11aee31061bcc1d2726bd8334a2883ddcdbde7d7744ed6b3bd11704ed/pylint_plugin_utils-0.8.2-py3-none-any.whl", hash = "sha256:ae11664737aa2effbf26f973a9e0b6779ab7106ec0adc5fe104b0907ca04e507", size = 11171 }, +] + +[[package]] +name = "pylint-pydantic" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "pylint" }, + { name = "pylint-plugin-utils" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/80/34b429c6534be99ef3d6d20bd794b26fda0682d38e2d57f85df258beaac2/pylint_pydantic-0.3.2-py3-none-any.whl", hash = "sha256:e5cec02370aa68ac8eff138e5d573b0ac049bab864e9a6c3a9057cf043440aa1", size = 15951 }, +] + +[[package]] +name = "pyproject-hooks" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/07/6f63dda440d4abb191b91dc383b472dae3dd9f37e4c1e4a5c3db150531c6/pyproject_hooks-1.1.0.tar.gz", hash = "sha256:4b37730834edbd6bd37f26ece6b44802fb1c1ee2ece0e54ddff8bfc06db86965", size = 7838 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/f3/431b9d5fe7d14af7a32340792ef43b8a714e7726f1d7b69cc4e8e7a3f1d7/pyproject_hooks-1.1.0-py3-none-any.whl", hash = "sha256:7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2", size = 9184 }, +] + +[[package]] +name = "pytest" +version = "8.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/8c/9862305bdcd6020bc7b45b1b5e7397a6caf1a33d3025b9a003b39075ffb2/pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce", size = 1439314 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/f9/cf155cf32ca7d6fa3601bc4c5dd19086af4b320b706919d48a4c79081cf9/pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5", size = 341802 }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/6d/c6cf50ce320cf8611df7a1254d86233b3df7cc07f9b5f5cbcb82e08aa534/pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276", size = 49855 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024 }, +] + +[[package]] +name = "pytest-subtests" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/fe/e691d2f4ce061a475f488cad1ef58431556affea323dde5c764fd7515a70/pytest_subtests-0.13.1.tar.gz", hash = "sha256:989e38f0f1c01bc7c6b2e04db7d9fd859db35d77c2c1a430c831a70cbf3fde2d", size = 15936 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/ac/fc132cb88e8f2042cebcb6ef0ffac40017c514fbadf3931e0b4bcb4bdfb6/pytest_subtests-0.13.1-py3-none-any.whl", hash = "sha256:ab616a22f64cd17c1aee65f18af94dbc30c444f8683de2b30895c3778265e3bd", size = 8038 }, +] + +[[package]] +name = "pytest-sugar" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "pytest" }, + { name = "termcolor" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/ac/5754f5edd6d508bc6493bc37d74b928f102a5fff82d9a80347e180998f08/pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a", size = 14992 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/fb/889f1b69da2f13691de09a111c16c4766a433382d44aa0ecf221deded44a/pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd", size = 10171 }, +] + +[[package]] +name = "pytest-timeout" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/93/0d/04719abc7a4bdb3a7a1f968f24b0f5253d698c9cc94975330e9d3145befb/pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9", size = 17697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/27/14af9ef8321f5edc7527e47def2a21d8118c6f329a9342cc61387a0c0599/pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e", size = 14148 }, +] + +[[package]] +name = "pytest-timer" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/f7/bc223798d5cccf2f03ed224a6fdceae005d06ae33fbd302741da9eb9e95a/pytest-timer-1.0.0.tar.gz", hash = "sha256:c65a36970e4425c0fd43bdf63b60359b353382632b08418c7c25918ec18a3829", size = 5812 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/48/486b6ff8b910852affeab5b628910f8faa861450a37c21c19902b798ebda/pytest_timer-1.0.0-py3-none-any.whl", hash = "sha256:60f16a6d98dd5e8ce3e57ece829592f6e081e18be9f1b20e5bc93e5e7196b065", size = 5331 }, +] + +[package.optional-dependencies] +colorama = [ + { name = "colorama" }, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "readchar" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/31/2934981710c63afa9c58947d2e676093ce4bb6c7ce60aac2fcc4be7d98d0/readchar-4.2.0.tar.gz", hash = "sha256:44807cbbe377b72079fea6cba8aa91c809982d7d727b2f0dbb2d1a8084914faa", size = 9691 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/6f/ca076ad4d18b3d33c31c304fb7e68dd9ce2bfdb49fb8874611ad7c55e969/readchar-4.2.0-py3-none-any.whl", hash = "sha256:2a587a27c981e6d25a518730ad4c88c429c315439baa6fda55d7a8b3ac4cb62a", size = 9349 }, +] + +[[package]] +name = "refurb" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/83/56ecbe3af6462e7a87cc4a302c2889e7ce447e9502ea76b7a739d1d46123/refurb-2.0.0.tar.gz", hash = "sha256:8a8f1e7c131ef7dc460cbecbeaf536f5eb0ecb657c099d7823941f0e65b1cfe1", size = 91453 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/3e/f0a47001b29205d96c9f2bf7f7383fdeadd7c35488e6dadd9afa3b6283e8/refurb-2.0.0-py3-none-any.whl", hash = "sha256:fa9e950dc6edd7473642569c118f8714eefd1e6f21a15ee4210a1be853aaaf80", size = 138560 }, +] + +[[package]] +name = "regex" +version = "2024.7.24" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/51/64256d0dc72816a4fe3779449627c69ec8fee5a5625fd60ba048f53b3478/regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506", size = 393485 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/ec/261f8434a47685d61e59a4ef3d9ce7902af521219f3ebd2194c7adb171a6/regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281", size = 470810 }, + { url = "https://files.pythonhosted.org/packages/f0/47/f33b1cac88841f95fff862476a9e875d9a10dae6912a675c6f13c128e5d9/regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b", size = 282126 }, + { url = "https://files.pythonhosted.org/packages/fc/1b/256ca4e2d5041c0aa2f1dc222f04412b796346ab9ce2aa5147405a9457b4/regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a", size = 278920 }, + { url = "https://files.pythonhosted.org/packages/91/03/4603ec057c0bafd2f6f50b0bdda4b12a0ff81022decf1de007b485c356a6/regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73", size = 785420 }, + { url = "https://files.pythonhosted.org/packages/75/f8/13b111fab93e6273e26de2926345e5ecf6ddad1e44c4d419d7b0924f9c52/regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2", size = 828164 }, + { url = "https://files.pythonhosted.org/packages/4a/80/bc3b9d31bd47ff578758af929af0ac1d6169b247e26fa6e87764007f3d93/regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e", size = 812621 }, + { url = "https://files.pythonhosted.org/packages/8b/77/92d4a14530900d46dddc57b728eea65d723cc9fcfd07b96c2c141dabba84/regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51", size = 786609 }, + { url = "https://files.pythonhosted.org/packages/35/58/06695fd8afad4c8ed0a53ec5e222156398b9fe5afd58887ab94ea68e4d16/regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364", size = 775290 }, + { url = "https://files.pythonhosted.org/packages/1b/0f/50b97ee1fc6965744b9e943b5c0f3740792ab54792df73d984510964ef29/regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee", size = 772849 }, + { url = "https://files.pythonhosted.org/packages/8f/64/565ff6cf241586ab7ae76bb4138c4d29bc1d1780973b457c2db30b21809a/regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c", size = 778428 }, + { url = "https://files.pythonhosted.org/packages/e5/fe/4ceabf4382e44e1e096ac46fd5e3bca490738b24157116a48270fd542e88/regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce", size = 849436 }, + { url = "https://files.pythonhosted.org/packages/68/23/1868e40d6b594843fd1a3498ffe75d58674edfc90d95e18dd87865b93bf2/regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1", size = 849484 }, + { url = "https://files.pythonhosted.org/packages/f3/52/bff76de2f6e2bc05edce3abeb7e98e6309aa022fc06071100a0216fbeb50/regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e", size = 776712 }, + { url = "https://files.pythonhosted.org/packages/f2/72/70ade7b0b5fe5c6df38fdfa2a5a8273e3ea6a10b772aa671b7e889e78bae/regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c", size = 257716 }, + { url = "https://files.pythonhosted.org/packages/04/4d/80e04f4e27ab0cbc9096e2d10696da6d9c26a39b60db52670fd57614fea5/regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52", size = 269662 }, + { url = "https://files.pythonhosted.org/packages/0f/26/f505782f386ac0399a9237571833f187414882ab6902e2e71a1ecb506835/regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86", size = 471748 }, + { url = "https://files.pythonhosted.org/packages/bb/1d/ea9a21beeb433dbfca31ab82867d69cb67ff8674af9fab6ebd55fa9d3387/regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad", size = 282841 }, + { url = "https://files.pythonhosted.org/packages/9b/f2/c6182095baf0a10169c34e87133a8e73b2e816a80035669b1278e927685e/regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9", size = 279114 }, + { url = "https://files.pythonhosted.org/packages/72/58/b5161bf890b6ca575a25685f19a4a3e3b6f4a072238814f8658123177d84/regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289", size = 789749 }, + { url = "https://files.pythonhosted.org/packages/09/fb/5381b19b62f3a3494266be462f6a015a869cf4bfd8e14d6e7db67e2c8069/regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9", size = 831666 }, + { url = "https://files.pythonhosted.org/packages/3d/6d/2a21c85f970f9be79357d12cf4b97f4fc6bf3bf6b843c39dabbc4e5f1181/regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c", size = 817544 }, + { url = "https://files.pythonhosted.org/packages/f9/ae/5f23e64f6cf170614237c654f3501a912dfb8549143d4b91d1cd13dba319/regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440", size = 790854 }, + { url = "https://files.pythonhosted.org/packages/29/0a/d04baad1bbc49cdfb4aef90c4fc875a60aaf96d35a1616f1dfe8149716bc/regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610", size = 779242 }, + { url = "https://files.pythonhosted.org/packages/3a/27/b242a962f650c3213da4596d70e24c7c1c46e3aa0f79f2a81164291085f8/regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5", size = 776932 }, + { url = "https://files.pythonhosted.org/packages/9c/ae/de659bdfff80ad2c0b577a43dd89dbc43870a4fc4bbf604e452196758e83/regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799", size = 784521 }, + { url = "https://files.pythonhosted.org/packages/d4/ac/eb6a796da0bdefbf09644a7868309423b18d344cf49963a9d36c13502d46/regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05", size = 854548 }, + { url = "https://files.pythonhosted.org/packages/56/77/fde8d825dec69e70256e0925af6c81eea9acf0a634d3d80f619d8dcd6888/regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94", size = 853345 }, + { url = "https://files.pythonhosted.org/packages/ff/04/2b79ad0bb9bc05ab4386caa2c19aa047a66afcbdfc2640618ffc729841e4/regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38", size = 781414 }, + { url = "https://files.pythonhosted.org/packages/bf/71/d0af58199283ada7d25b20e416f5b155f50aad99b0e791c0966ff5a1cd00/regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc", size = 258125 }, + { url = "https://files.pythonhosted.org/packages/95/b3/10e875c45c60b010b66fc109b899c6fc4f05d485fe1d54abff98ce791124/regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908", size = 269162 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "returns" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/b4/411f1e4026344f0ccabc5654aee02280ebe8a9783756e53c76437693dc9a/returns-0.23.0.tar.gz", hash = "sha256:27594c28e5fc338e052d27ddf77fe1da82db4472f6d59901e7e9165be35a5256", size = 242563 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/cc/ff7046c009c17136f83764f8ff225309b58d364c2ffd5626b5a338bd865a/returns-0.23.0-py3-none-any.whl", hash = "sha256:278aa6168072b24574ad14be32f7123d1b835928473dd40bc506f47c8b25859a", size = 155284 }, +] + +[[package]] +name = "runs" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "xmod" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/6d/b9aace390f62db5d7d2c77eafce3d42774f27f1829d24fa9b6f598b3ef71/runs-1.2.2.tar.gz", hash = "sha256:9dc1815e2895cfb3a48317b173b9f1eac9ba5549b36a847b5cc60c3bf82ecef1", size = 5474 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/d6/17caf2e4af1dec288477a0cbbe4a96fbc9b8a28457dce3f1f452630ce216/runs-1.2.2-py3-none-any.whl", hash = "sha256:0980dcbc25aba1505f307ac4f0e9e92cbd0be2a15a1e983ee86c24c87b839dfd", size = 7033 }, +] + +[[package]] +name = "sentry-sdk" +version = "2.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/41/97f673384dae5ed81cc2a568cc5c28e76deee85f8ba50def862e86150a5a/sentry_sdk-2.13.0.tar.gz", hash = "sha256:8d4a576f7a98eb2fdb40e13106e41f330e5c79d72a68be1316e7852cf4995260", size = 279937 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/7e/e9ca09f24a6c334286631a2d32c267cdc5edad5ac03fd9d20a01a82f1c35/sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6", size = 309078 }, +] + +[[package]] +name = "six" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, +] + +[[package]] +name = "smmap" +version = "5.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/04/b5bf6d21dc4041000ccba7eb17dd3055feb237e7ffc2c20d3fae3af62baa/smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62", size = 22291 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/a5/10f97f73544edcdef54409f1d839f6049a0d79df68adbc1ceb24d1aaca42/smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da", size = 24282 }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.32" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "(python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64')" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/af/6f/967e987683908af816aa3072c1a6997ac9933cf38d66b0474fb03f253323/SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8", size = 9546691 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/a9/e3bd92004095ed6796ea4ac5fdd9606b1e53117ef5b90ae79ac3fc6e225e/SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f", size = 2088752 }, + { url = "https://files.pythonhosted.org/packages/a9/34/b97f4458eefbdead7ee5ce69cbf3591574c5ba44162dbe52c4386818623f/SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5", size = 2079150 }, + { url = "https://files.pythonhosted.org/packages/6b/b5/95ff12f5d4eb7813dd5a59ccc8e3c68d4683fedf59801b40704593c3b757/SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d", size = 3197551 }, + { url = "https://files.pythonhosted.org/packages/ca/af/379f8695ab751acf61868b0098c8d66e2b2ad8b11d9939d5144c82d05bc5/SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0", size = 3197551 }, + { url = "https://files.pythonhosted.org/packages/ff/0c/5feaea51f23b5f008f16f9dbf7eec18ee5b9b8eb2875d6e367f52daf633e/SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2", size = 3134583 }, + { url = "https://files.pythonhosted.org/packages/cc/83/4eca3604f9049a2b92a9ffb818ea1cc8186f722e539a6feee58f931bad34/SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961", size = 3154911 }, + { url = "https://files.pythonhosted.org/packages/3d/56/485ad322f148a8b70060e03b5f130e714f95d839b5e50315e5c5efd1fc05/SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28", size = 2059047 }, + { url = "https://files.pythonhosted.org/packages/bb/8c/4548ae42b4ab7f3fe9f1aeb4b1f28ea795485ca44840cb0f3f57aa8ecfcc/SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924", size = 2084480 }, + { url = "https://files.pythonhosted.org/packages/06/95/88beb07aa61c611829c9ce950f349adcf00065c1bb313090c20d80a520ca/SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92", size = 2087267 }, + { url = "https://files.pythonhosted.org/packages/11/93/0b28f9d261af927eef3df472e5bbf144fb33e062de770b2c312bb516702b/SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9", size = 2077732 }, + { url = "https://files.pythonhosted.org/packages/84/50/1ce1dec4b1cce8f1163c2c58bb1588ac5076c3dbc4bb1d3eab70e798fdd4/SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8", size = 3227230 }, + { url = "https://files.pythonhosted.org/packages/9d/b8/aa822988d390cf06afa3c69d86a3a38bba79b51385207cd7cd99d0be17bb/SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec", size = 3238118 }, + { url = "https://files.pythonhosted.org/packages/c3/d7/7a65172ed2713acf0262a65392dfcf05ca2b7a67c988ebad425eba9b3843/SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c", size = 3173610 }, + { url = "https://files.pythonhosted.org/packages/a9/0f/8da0613e3f0b095ef423802943ed4b98242370736034ed5043a43c46c3d4/SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb", size = 3200224 }, + { url = "https://files.pythonhosted.org/packages/50/ef/973e0bbf2be5c12e34dca92139ca100f51ba078e36c3c06fd1dc8480c209/SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d", size = 2057626 }, + { url = "https://files.pythonhosted.org/packages/db/5f/440c324aae82a2ce892ac0fe1d114b9dc9f04e934e8f0762574876a168b5/SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb", size = 2083167 }, + { url = "https://files.pythonhosted.org/packages/99/1b/045185a9f6481d926a451aafaa0d07c98f19ac7abe730dff9630c9ead4fa/SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202", size = 1878765 }, +] + +[package.optional-dependencies] +aiosqlite = [ + { name = "aiosqlite" }, + { name = "greenlet" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "termcolor" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/10/56/d7d66a84f96d804155f6ff2873d065368b25a07222a6fd51c4f24ef6d764/termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a", size = 12664 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5f/8c716e47b3a50cbd7c146f45881e11d9414def768b7cd9c5e6650ec2a80a/termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63", size = 7719 }, +] + +[[package]] +name = "tiktoken" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/4a/abaec53e93e3ef37224a4dd9e2fc6bb871e7a538c2b6b9d2a6397271daf4/tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6", size = 33437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/eb/57492b2568eea1d546da5cc1ae7559d924275280db80ba07e6f9b89a914b/tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f", size = 961468 }, + { url = "https://files.pythonhosted.org/packages/30/ef/e07dbfcb2f85c84abaa1b035a9279575a8da0236305491dc22ae099327f7/tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f", size = 907005 }, + { url = "https://files.pythonhosted.org/packages/ea/9b/f36db825b1e9904c3a2646439cb9923fc1e09208e2e071c6d9dd64ead131/tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b", size = 1049183 }, + { url = "https://files.pythonhosted.org/packages/61/b4/b80d1fe33015e782074e96bbbf4108ccd283b8deea86fb43c15d18b7c351/tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992", size = 1080830 }, + { url = "https://files.pythonhosted.org/packages/2a/40/c66ff3a21af6d62a7e0ff428d12002c4e0389f776d3ff96dcaa0bb354eee/tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1", size = 1092967 }, + { url = "https://files.pythonhosted.org/packages/2e/80/f4c9e255ff236e6a69ce44b927629cefc1b63d3a00e2d1c9ed540c9492d2/tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89", size = 1142682 }, + { url = "https://files.pythonhosted.org/packages/b1/10/c04b4ff592a5f46b28ebf4c2353f735c02ae7f0ce1b165d00748ced6467e/tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb", size = 799009 }, + { url = "https://files.pythonhosted.org/packages/1d/46/4cdda4186ce900608f522da34acf442363346688c71b938a90a52d7b84cc/tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908", size = 960446 }, + { url = "https://files.pythonhosted.org/packages/b6/30/09ced367d280072d7a3e21f34263dfbbf6378661e7a0f6414e7c18971083/tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410", size = 906652 }, + { url = "https://files.pythonhosted.org/packages/e6/7b/c949e4954441a879a67626963dff69096e3c774758b9f2bb0853f7b4e1e7/tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704", size = 1047904 }, + { url = "https://files.pythonhosted.org/packages/50/81/1842a22f15586072280364c2ab1e40835adaf64e42fe80e52aff921ee021/tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350", size = 1079836 }, + { url = "https://files.pythonhosted.org/packages/6d/87/51a133a3d5307cf7ae3754249b0faaa91d3414b85c3d36f80b54d6817aa6/tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4", size = 1092472 }, + { url = "https://files.pythonhosted.org/packages/a5/1f/c93517dc6d3b2c9e988b8e24f87a8b2d4a4ab28920a3a3f3ea338397ae0c/tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97", size = 1141881 }, + { url = "https://files.pythonhosted.org/packages/bf/4b/48ca098cb580c099b5058bf62c4cb5e90ca6130fa43ef4df27088536245b/tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f", size = 799281 }, +] + +[[package]] +name = "timeout-decorator" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/f8/0802dd14c58b5d3d72bb9caa4315535f58787a1dc50b81bbbcaaa15451be/timeout-decorator-0.5.0.tar.gz", hash = "sha256:6a2f2f58db1c5b24a2cc79de6345760377ad8bdc13813f5265f6c3e63d16b3d7", size = 4754 } + +[[package]] +name = "tomlkit" +version = "0.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b1/09/a439bec5888f00a54b8b9f05fa94d7f901d6735ef4e55dcec9bc37b5d8fa/tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79", size = 192885 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/b6/a447b5e4ec71e13871be01ba81f5dfc9d0af7e473da256ff46bc0e24026f/tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde", size = 37955 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "typeguard" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/e1/3178b3e5369a98239ed7301e3946747048c66f4023163d55918f11b82d4e/typeguard-4.3.0.tar.gz", hash = "sha256:92ee6a0aec9135181eae6067ebd617fd9de8d75d714fb548728a4933b1dea651", size = 73374 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/de/be0ba39ee73760bf33329b7c6f95bc67e96593c69c881671e312538e24bb/typeguard-4.3.0-py3-none-any.whl", hash = "sha256:4d24c5b39a117f8a895b9da7a9b3114f04eb63bade45a4492de49b175b6f7dfa", size = 35385 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "unidiff" +version = "0.7.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a3/48/81be0ac96e423a877754153699731ef439fd7b80b4c8b5425c94ed079ebd/unidiff-0.7.5.tar.gz", hash = "sha256:2e5f0162052248946b9f0970a40e9e124236bf86c82b70821143a6fc1dea2574", size = 20931 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/54/57c411a6e8f7bd7848c8b66e4dcaffa586bf4c02e63f2280db0327a4e6eb/unidiff-0.7.5-py2.py3-none-any.whl", hash = "sha256:c93bf2265cc1ba2a520e415ab05da587370bc2a3ae9e0414329f54f0c2fc09e8", size = 14386 }, +] + +[[package]] +name = "unittest-xml-reporting" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lxml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/40/3bf1afc96e93c7322520981ac4593cbb29daa21b48d32746f05ab5563dca/unittest-xml-reporting-3.2.0.tar.gz", hash = "sha256:edd8d3170b40c3a81b8cf910f46c6a304ae2847ec01036d02e9c0f9b85762d28", size = 18002 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/88/f6e9b87428584a3c62cac768185c438ca6d561367a5d267b293259d76075/unittest_xml_reporting-3.2.0-py2.py3-none-any.whl", hash = "sha256:f3d7402e5b3ac72a5ee3149278339db1a8f932ee405f48bcb9c681372f2717d5", size = 20936 }, +] + +[[package]] +name = "urllib3" +version = "2.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/6d/fa469ae21497ddc8bc93e5877702dca7cb8f911e337aca7452b5724f1bb6/urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168", size = 292266 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/1c/89ffc63a9605b583d5df2be791a27bc1a42b7c32bab68d3c8f2f73a98cd4/urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", size = 121444 }, +] + +[[package]] +name = "virtualenv" +version = "20.26.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/60/db9f95e6ad456f1872486769c55628c7901fb4de5a72c2f7bdd912abf0c1/virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a", size = 9057588 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/4d/410156100224c5e2f0011d435e477b57aed9576fc7fe137abcf14ec16e11/virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589", size = 5684792 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] + +[[package]] +name = "xmod" +version = "1.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/b2/e3edc608823348e628a919e1d7129e641997afadd946febdd704aecc5881/xmod-1.8.1.tar.gz", hash = "sha256:38c76486b9d672c546d57d8035df0beb7f4a9b088bc3fb2de5431ae821444377", size = 3988 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/0dc75b64a764ea1cb8e4c32d1fb273c147304d4e5483cd58be482dc62e45/xmod-1.8.1-py3-none-any.whl", hash = "sha256:a24e9458a4853489042522bdca9e50ee2eac5ab75c809a91150a8a7f40670d48", size = 4610 }, +] From 86e3d56ed655cc4b4b23a9a09957a76db07956f3 Mon Sep 17 00:00:00 2001 From: James Braza Date: Tue, 3 Sep 2024 13:21:23 -0700 Subject: [PATCH 2/5] Added dependencies to pyproject.toml --- .pre-commit-config.yaml | 13 + pyproject.toml | 52 +- uv.lock | 1521 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 1551 insertions(+), 35 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c0b97dcc..ec5b3439 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,3 +65,16 @@ repos: rev: v1.11.2 hooks: - id: mypy + additional_dependencies: + - fastapi>=0.109 # Match pyproject.toml + - fhaviary + - httpx + - litellm>=1.40.9,<=1.40.12 # Match pyproject.toml + - numpy + - pydantic~=2.0 # Match pyproject.toml + - tenacity + - torch + - types-aiofiles + - types-tqdm + - usearch>=2.13 # Match pyproject.toml + - wandb diff --git a/pyproject.toml b/pyproject.toml index c67f101a..602391bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,21 @@ classifiers = [ "Programming Language :: Python", ] dependencies = [ + "aiofiles", + "dm-tree", + "fhaviary", + "httpx", + "litellm", + "networkx[default]~=3.0", # Pin just to keep recent + "numpy", + "openai>=1", + "pydantic~=2.0", + "tenacity", + "tiktoken", + "torch", + "tqdm", + "transformers", + "usearch>=2.13", # For py.typed ] description = "Agent framework for constructing language model agents and training on constructive tasks." dynamic = ["version"] @@ -22,6 +37,21 @@ name = "ldp" readme = "README.md" requires-python = ">=3.11" +[project.optional-dependencies] +monitor = [ + "wandb", +] +server = [ + "fastapi>=0.109", # For Python 3.12 support +] +typing = [ + "types-aiofiles", + "types-tqdm", +] +visualization = [ + "pydot~=2.0", # Pin to match version pinned by networkx "extra" +] + [project.urls] issues = "https://github.com/Future-House/ldp/issues" repository = "https://github.com/Future-House/ldp" @@ -98,6 +128,18 @@ warn_unused_configs = true # Warns about unneeded `# type: ignore` comments. warn_unused_ignores = true +[[tool.mypy.overrides]] +# Suppresses error messages about imports that cannot be resolved. +ignore_missing_imports = true +# Per-module configuration options +module = [ + "litellm", # SEE: https://github.com/BerriAI/litellm/issues/825 + "networkx", # SEE: https://github.com/networkx/networkx/issues/3988 + "pydot", + "transformers.*", # SEE: https://github.com/huggingface/transformers/pull/18485 + "tree", # SEE: https://github.com/google-deepmind/tree/issues/84 +] + [tool.pylint] [tool.pylint.design] @@ -351,20 +393,22 @@ trailing_comma_inline_array = true [tool.uv] dev-dependencies = [ - "SQLAlchemy[aiosqlite]~=2.0", # Match aviary dependencies "build", # TODO: remove after https://github.com/astral-sh/uv/issues/6278 "codeflash", + "fhaviary[xml]", "ipython>=8", # Pin to keep recent + "ldp[monitor,server,typing,visualization]", + "litellm>=1.40.9,<=1.40.12", # Pin lower for get_supported_openai_params not requiring custom LLM, upper for https://github.com/BerriAI/litellm/issues/4032 "mypy>=1.8", # Pin for mutable-override - "pre-commit~=3.4", + "pre-commit~=3.4", # Pin to keep recent "pylint-pydantic", - "pylint>=3.2", + "pylint>=3.2", # Pin to keep recent "pytest-asyncio", + "pytest-rerunfailures", "pytest-subtests", "pytest-sugar", "pytest-timer[colorama]", "pytest-xdist", "pytest>=8", # Pin to keep recent "refurb>=2", # Pin to keep recent - "typeguard", ] diff --git a/uv.lock b/uv.lock index 82e43446..3fa615f6 100644 --- a/uv.lock +++ b/uv.lock @@ -1,9 +1,103 @@ version = 1 requires-python = ">=3.11" resolution-markers = [ - "python_full_version < '3.12'", - "python_full_version < '3.13'", - "python_full_version >= '3.13'", + "python_full_version < '3.12' and sys_platform == 'linux'", + "python_full_version == '3.12.*' and sys_platform == 'linux'", + "python_full_version < '3.12' and sys_platform != 'linux'", + "python_full_version == '3.12.*' and sys_platform != 'linux'", + "python_full_version >= '3.13' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'linux'", +] + +[[package]] +name = "aiofiles" +version = "24.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896 }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f7/22bba300a16fd1cad99da1a23793fe43963ee326d012fdf852d0b4035955/aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2", size = 16786 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/b6/58ea188899950d759a837f9a58b2aee1d1a380ea4d6211ce9b1823748851/aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd", size = 12155 }, +] + +[[package]] +name = "aiohttp" +version = "3.10.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/28/ca549838018140b92a19001a8628578b0f2a3b38c16826212cc6f706e6d4/aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691", size = 7524360 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/90/54ccb1e4eadfb6c95deff695582453f6208584431d69bf572782e9ae542b/aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2", size = 586455 }, + { url = "https://files.pythonhosted.org/packages/c3/7a/95e88c02756e7e718f054e1bb3ec6ad5d0ee4a2ca2bb1768c5844b3de30a/aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf", size = 397255 }, + { url = "https://files.pythonhosted.org/packages/07/4f/767387b39990e1ee9aba8ce642abcc286d84d06e068dc167dab983898f18/aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e", size = 388973 }, + { url = "https://files.pythonhosted.org/packages/61/46/0df41170a4d228c07b661b1ba9d87101d99a79339dc93b8b1183d8b20545/aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77", size = 1326126 }, + { url = "https://files.pythonhosted.org/packages/af/20/da0d65e07ce49d79173fed41598f487a0a722e87cfbaa8bb7e078a7c1d39/aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061", size = 1364538 }, + { url = "https://files.pythonhosted.org/packages/aa/20/b59728405114e57541ba9d5b96033e69d004e811ded299537f74237629ca/aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697", size = 1399896 }, + { url = "https://files.pythonhosted.org/packages/2a/92/006690c31b830acbae09d2618e41308fe4c81c0679b3b33a3af859e0b7bf/aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7", size = 1312914 }, + { url = "https://files.pythonhosted.org/packages/d4/71/1a253ca215b6c867adbd503f1e142117527ea8775e65962bc09b2fad1d2c/aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0", size = 1271301 }, + { url = "https://files.pythonhosted.org/packages/0a/ab/5d1d9ff9ce6cce8fa54774d0364e64a0f3cd50e512ff09082ced8e5217a1/aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5", size = 1291652 }, + { url = "https://files.pythonhosted.org/packages/75/5f/f90510ea954b9ae6e7a53d2995b97a3e5c181110fdcf469bc9238445871d/aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e", size = 1286289 }, + { url = "https://files.pythonhosted.org/packages/be/9e/1f523414237798660921817c82b9225a363af436458caf584d2fa6a2eb4a/aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1", size = 1341848 }, + { url = "https://files.pythonhosted.org/packages/f6/36/443472ddaa85d7d80321fda541d9535b23ecefe0bf5792cc3955ea635190/aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277", size = 1361619 }, + { url = "https://files.pythonhosted.org/packages/19/f6/3ecbac0bc4359c7d7ba9e85c6b10f57e20edaf1f97751ad2f892db231ad0/aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058", size = 1320869 }, + { url = "https://files.pythonhosted.org/packages/34/7e/ed74ffb36e3a0cdec1b05d8fbaa29cb532371d5a20058b3a8052fc90fe7c/aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072", size = 359271 }, + { url = "https://files.pythonhosted.org/packages/98/1b/718901f04bc8c886a742be9e83babb7b93facabf7c475cc95e2b3ab80b4d/aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff", size = 379143 }, + { url = "https://files.pythonhosted.org/packages/d9/1c/74f9dad4a2fc4107e73456896283d915937f48177b99867b63381fadac6e/aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487", size = 583468 }, + { url = "https://files.pythonhosted.org/packages/12/29/68d090551f2b58ce76c2b436ced8dd2dfd32115d41299bf0b0c308a5483c/aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a", size = 394066 }, + { url = "https://files.pythonhosted.org/packages/8f/f7/971f88b4cdcaaa4622925ba7d86de47b48ec02a9040a143514b382f78da4/aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d", size = 389098 }, + { url = "https://files.pythonhosted.org/packages/f1/5a/fe3742efdce551667b2ddf1158b27c5b8eb1edc13d5e14e996e52e301025/aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75", size = 1332742 }, + { url = "https://files.pythonhosted.org/packages/1a/52/a25c0334a1845eb4967dff279151b67ca32a948145a5812ed660ed900868/aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178", size = 1372134 }, + { url = "https://files.pythonhosted.org/packages/96/3d/33c1d8efc2d8ec36bff9a8eca2df9fdf8a45269c6e24a88e74f2aa4f16bd/aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e", size = 1414413 }, + { url = "https://files.pythonhosted.org/packages/64/74/0f1ddaa5f0caba1d946f0dd0c31f5744116e4a029beec454ec3726d3311f/aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f", size = 1328107 }, + { url = "https://files.pythonhosted.org/packages/0a/32/c10118f0ad50e4093227234f71fd0abec6982c29367f65f32ee74ed652c4/aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73", size = 1280126 }, + { url = "https://files.pythonhosted.org/packages/c6/c9/77e3d648d97c03a42acfe843d03e97be3c5ef1b4d9de52e5bd2d28eed8e7/aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf", size = 1292660 }, + { url = "https://files.pythonhosted.org/packages/7e/5d/99c71f8e5c8b64295be421b4c42d472766b263a1fe32e91b64bf77005bf2/aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820", size = 1300988 }, + { url = "https://files.pythonhosted.org/packages/8f/2c/76d2377dd947f52fbe8afb19b18a3b816d66c7966755c04030f93b1f7b2d/aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca", size = 1339268 }, + { url = "https://files.pythonhosted.org/packages/fd/e6/3d9d935cc705d57ed524d82ec5d6b678a53ac1552720ae41282caa273584/aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91", size = 1366993 }, + { url = "https://files.pythonhosted.org/packages/fe/c2/f7eed4d602f3f224600d03ab2e1a7734999b0901b1c49b94dc5891340433/aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6", size = 1329459 }, + { url = "https://files.pythonhosted.org/packages/ce/8f/27f205b76531fc592abe29e1ad265a16bf934a9f609509c02d765e6a8055/aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12", size = 356968 }, + { url = "https://files.pythonhosted.org/packages/39/8c/4f6c0b2b3629f6be6c81ab84d9d577590f74f01d4412bfc4067958eaa1e1/aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc", size = 377650 }, + { url = "https://files.pythonhosted.org/packages/7b/b9/03b4327897a5b5d29338fa9b514f1c2f66a3e4fc88a4e40fad478739314d/aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092", size = 576994 }, + { url = "https://files.pythonhosted.org/packages/67/1b/20c2e159cd07b8ed6dde71c2258233902fdf415b2fe6174bd2364ba63107/aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77", size = 390684 }, + { url = "https://files.pythonhosted.org/packages/4d/6b/ff83b34f157e370431d8081c5d1741963f4fb12f9aaddb2cacbf50305225/aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385", size = 386176 }, + { url = "https://files.pythonhosted.org/packages/4d/a1/6e92817eb657de287560962df4959b7ddd22859c4b23a0309e2d3de12538/aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972", size = 1303310 }, + { url = "https://files.pythonhosted.org/packages/04/29/200518dc7a39c30ae6d5bc232d7207446536e93d3d9299b8e95db6e79c54/aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16", size = 1340445 }, + { url = "https://files.pythonhosted.org/packages/8e/20/53f7bba841ba7b5bb5dea580fea01c65524879ba39cb917d08c845524717/aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6", size = 1385121 }, + { url = "https://files.pythonhosted.org/packages/f1/b4/d99354ad614c48dd38fb1ee880a1a54bd9ab2c3bcad3013048d4a1797d3a/aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa", size = 1299669 }, + { url = "https://files.pythonhosted.org/packages/51/39/ca1de675f2a5729c71c327e52ac6344e63f036bd37281686ae5c3fb13bfb/aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689", size = 1252638 }, + { url = "https://files.pythonhosted.org/packages/54/cf/a3ae7ff43138422d477348e309ef8275779701bf305ff6054831ef98b782/aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57", size = 1266889 }, + { url = "https://files.pythonhosted.org/packages/6e/7a/c6027ad70d9fb23cf254a26144de2723821dade1a624446aa22cd0b6d012/aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f", size = 1266249 }, + { url = "https://files.pythonhosted.org/packages/64/fd/ed136d46bc2c7e3342fed24662b4827771d55ceb5a7687847aae977bfc17/aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599", size = 1311036 }, + { url = "https://files.pythonhosted.org/packages/76/9a/43eeb0166f1119256d6f43468f900db1aed7fbe32069d2a71c82f987db4d/aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5", size = 1338756 }, + { url = "https://files.pythonhosted.org/packages/d5/bc/d01ff0810b3f5e26896f76d44225ed78b088ddd33079b85cd1a23514318b/aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987", size = 1299976 }, + { url = "https://files.pythonhosted.org/packages/3e/c9/50a297c4f7ab57a949f4add2d3eafe5f3e68bb42f739e933f8b32a092bda/aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04", size = 355609 }, + { url = "https://files.pythonhosted.org/packages/65/28/aee9d04fb0b3b1f90622c338a08e54af5198e704a910e20947c473298fd0/aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022", size = 375697 }, +] + +[[package]] +name = "aiosignal" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/67/0952ed97a9793b4958e5736f6d2b346b414a2cd63e82d05940032f45b32f/aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc", size = 19422 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/ac/a7305707cb852b7e16ff80eaf5692309bde30e2b1100a1fcacdc8f731d97/aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17", size = 7617 }, ] [[package]] @@ -36,6 +130,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/75/f9/f1c10e223c7b56a38109a3f2eb4e7fe9a757ea3ed3a166754fb30f65e466/ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec", size = 63675 }, ] +[[package]] +name = "anyio" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/e3/c4c8d473d6780ef1853d630d581f70d655b4f8d7553c6997958c283039a2/anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94", size = 163930 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/a2/10639a79341f6c019dedc95bd48a4928eed9f1d1197f4c04f546fc7ae0ff/anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7", size = 86780 }, +] + [[package]] name = "astroid" version = "3.2.4" @@ -238,6 +345,64 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, ] +[[package]] +name = "contourpy" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/f6/31a8f28b4a2a4fa0e01085e542f3081ab0588eff8e589d39d775172c9792/contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4", size = 13464370 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/1f/9375917786cb39270b0ee6634536c0e22abf225825602688990d8f5c6c19/contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad", size = 266356 }, + { url = "https://files.pythonhosted.org/packages/05/46/9256dd162ea52790c127cb58cfc3b9e3413a6e3478917d1f811d420772ec/contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49", size = 250915 }, + { url = "https://files.pythonhosted.org/packages/e1/5d/3056c167fa4486900dfbd7e26a2fdc2338dc58eee36d490a0ed3ddda5ded/contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66", size = 310443 }, + { url = "https://files.pythonhosted.org/packages/ca/c2/1a612e475492e07f11c8e267ea5ec1ce0d89971be496c195e27afa97e14a/contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081", size = 348548 }, + { url = "https://files.pythonhosted.org/packages/45/cf/2c2fc6bb5874158277b4faf136847f0689e1b1a1f640a36d76d52e78907c/contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1", size = 319118 }, + { url = "https://files.pythonhosted.org/packages/03/33/003065374f38894cdf1040cef474ad0546368eea7e3a51d48b8a423961f8/contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d", size = 323162 }, + { url = "https://files.pythonhosted.org/packages/42/80/e637326e85e4105a802e42959f56cff2cd39a6b5ef68d5d9aee3ea5f0e4c/contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c", size = 1265396 }, + { url = "https://files.pythonhosted.org/packages/7c/3b/8cbd6416ca1bbc0202b50f9c13b2e0b922b64be888f9d9ee88e6cfabfb51/contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb", size = 1324297 }, + { url = "https://files.pythonhosted.org/packages/4d/2c/021a7afaa52fe891f25535506cc861c30c3c4e5a1c1ce94215e04b293e72/contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c", size = 171808 }, + { url = "https://files.pythonhosted.org/packages/8d/2f/804f02ff30a7fae21f98198828d0857439ec4c91a96e20cf2d6c49372966/contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67", size = 217181 }, + { url = "https://files.pythonhosted.org/packages/c9/92/8e0bbfe6b70c0e2d3d81272b58c98ac69ff1a4329f18c73bd64824d8b12e/contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f", size = 267838 }, + { url = "https://files.pythonhosted.org/packages/e3/04/33351c5d5108460a8ce6d512307690b023f0cfcad5899499f5c83b9d63b1/contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6", size = 251549 }, + { url = "https://files.pythonhosted.org/packages/51/3d/aa0fe6ae67e3ef9f178389e4caaaa68daf2f9024092aa3c6032e3d174670/contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639", size = 303177 }, + { url = "https://files.pythonhosted.org/packages/56/c3/c85a7e3e0cab635575d3b657f9535443a6f5d20fac1a1911eaa4bbe1aceb/contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c", size = 341735 }, + { url = "https://files.pythonhosted.org/packages/dd/8d/20f7a211a7be966a53f474bc90b1a8202e9844b3f1ef85f3ae45a77151ee/contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06", size = 314679 }, + { url = "https://files.pythonhosted.org/packages/6e/be/524e377567defac0e21a46e2a529652d165fed130a0d8a863219303cee18/contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09", size = 320549 }, + { url = "https://files.pythonhosted.org/packages/0f/96/fdb2552a172942d888915f3a6663812e9bc3d359d53dafd4289a0fb462f0/contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd", size = 1263068 }, + { url = "https://files.pythonhosted.org/packages/2a/25/632eab595e3140adfa92f1322bf8915f68c932bac468e89eae9974cf1c00/contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35", size = 1322833 }, + { url = "https://files.pythonhosted.org/packages/73/e3/69738782e315a1d26d29d71a550dbbe3eb6c653b028b150f70c1a5f4f229/contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb", size = 172681 }, + { url = "https://files.pythonhosted.org/packages/0c/89/9830ba00d88e43d15e53d64931e66b8792b46eb25e2050a88fec4a0df3d5/contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b", size = 218283 }, + { url = "https://files.pythonhosted.org/packages/53/a1/d20415febfb2267af2d7f06338e82171824d08614084714fb2c1dac9901f/contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3", size = 267879 }, + { url = "https://files.pythonhosted.org/packages/aa/45/5a28a3570ff6218d8bdfc291a272a20d2648104815f01f0177d103d985e1/contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7", size = 251573 }, + { url = "https://files.pythonhosted.org/packages/39/1c/d3f51540108e3affa84f095c8b04f0aa833bb797bc8baa218a952a98117d/contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84", size = 303184 }, + { url = "https://files.pythonhosted.org/packages/00/56/1348a44fb6c3a558c1a3a0cd23d329d604c99d81bf5a4b58c6b71aab328f/contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0", size = 340262 }, + { url = "https://files.pythonhosted.org/packages/2b/23/00d665ba67e1bb666152131da07e0f24c95c3632d7722caa97fb61470eca/contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b", size = 313806 }, + { url = "https://files.pythonhosted.org/packages/5a/42/3cf40f7040bb8362aea19af9a5fb7b32ce420f645dd1590edcee2c657cd5/contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da", size = 319710 }, + { url = "https://files.pythonhosted.org/packages/05/32/f3bfa3fc083b25e1a7ae09197f897476ee68e7386e10404bdf9aac7391f0/contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14", size = 1264107 }, + { url = "https://files.pythonhosted.org/packages/1c/1e/1019d34473a736664f2439542b890b2dc4c6245f5c0d8cdfc0ccc2cab80c/contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8", size = 1322458 }, + { url = "https://files.pythonhosted.org/packages/22/85/4f8bfd83972cf8909a4d36d16b177f7b8bdd942178ea4bf877d4a380a91c/contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294", size = 172643 }, + { url = "https://files.pythonhosted.org/packages/cc/4a/fb3c83c1baba64ba90443626c228ca14f19a87c51975d3b1de308dd2cf08/contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087", size = 218301 }, + { url = "https://files.pythonhosted.org/packages/76/65/702f4064f397821fea0cb493f7d3bc95a5d703e20954dce7d6d39bacf378/contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8", size = 278972 }, + { url = "https://files.pythonhosted.org/packages/80/85/21f5bba56dba75c10a45ec00ad3b8190dbac7fd9a8a8c46c6116c933e9cf/contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b", size = 263375 }, + { url = "https://files.pythonhosted.org/packages/0a/64/084c86ab71d43149f91ab3a4054ccf18565f0a8af36abfa92b1467813ed6/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973", size = 307188 }, + { url = "https://files.pythonhosted.org/packages/3d/ff/d61a4c288dc42da0084b8d9dc2aa219a850767165d7d9a9c364ff530b509/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18", size = 345644 }, + { url = "https://files.pythonhosted.org/packages/ca/aa/00d2313d35ec03f188e8f0786c2fc61f589306e02fdc158233697546fd58/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8", size = 317141 }, + { url = "https://files.pythonhosted.org/packages/8d/6a/b5242c8cb32d87f6abf4f5e3044ca397cb1a76712e3fa2424772e3ff495f/contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6", size = 323469 }, + { url = "https://files.pythonhosted.org/packages/6f/a6/73e929d43028a9079aca4bde107494864d54f0d72d9db508a51ff0878593/contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2", size = 1260894 }, + { url = "https://files.pythonhosted.org/packages/2b/1e/1e726ba66eddf21c940821df8cf1a7d15cb165f0682d62161eaa5e93dae1/contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927", size = 1314829 }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 }, +] + [[package]] name = "decorator" version = "5.1.1" @@ -247,6 +412,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, ] +[[package]] +name = "dicttoxml" +version = "1.7.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/c9/3132427f9e64d572688e6a1cbe3d542d1a03f676b81fb600f3d1fd7d2ec5/dicttoxml-1.7.16.tar.gz", hash = "sha256:6f36ce644881db5cd8940bee9b7cb3f3f6b7b327ba8a67d83d3e2caa0538bf9d", size = 39314 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/40/9d521973cae7f7ef8b1f0d0e28a3db0f851c1f1dca45d4c2ed5360bb7246/dicttoxml-1.7.16-py3-none-any.whl", hash = "sha256:8677671496d0d38e66c7179f82a7e9059f94887777955dc71b0ac602ee637c26", size = 24155 }, +] + [[package]] name = "dill" version = "0.3.8" @@ -265,6 +439,59 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8e/41/9307e4f5f9976bc8b7fea0b66367734e8faf3ec84bc0d412d8cfabbb66cd/distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784", size = 468850 }, ] +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 }, +] + +[[package]] +name = "dm-tree" +version = "0.1.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/6d/f1997aac42e0f550c1e952a0b920eaa0bfc4d27d0421499881b934b969fc/dm-tree-0.1.8.tar.gz", hash = "sha256:0fcaabbb14e7980377439e7140bd05552739ca5e515ecb3119f234acee4b9430", size = 35384 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/64/901b324804793743f0fdc9e47db893bf0ded9e074850fab2440af330fe83/dm_tree-0.1.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad16ceba90a56ec47cf45b21856d14962ac314787975ef786efb5e6e9ca75ec7", size = 167628 }, + { url = "https://files.pythonhosted.org/packages/b1/65/4f10a68dde5fa0c91043c9c899e9bc79b1657ba932d39a5f8525c0058e68/dm_tree-0.1.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:803bfc53b4659f447ac694dbd04235f94a73ef7c1fd1e0df7c84ac41e0bc963b", size = 115351 }, + { url = "https://files.pythonhosted.org/packages/08/e2/4c29cb9876456517f21979ddcbb6048f28a3b52c61aa9d14d42adafcdca4/dm_tree-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:378cc8ad93c5fe3590f405a309980721f021c790ca1bdf9b15bb1d59daec57f5", size = 110661 }, + { url = "https://files.pythonhosted.org/packages/fe/89/386332bbd7567c4ccc13aa2e58f733237503fc75fb389955d3b06b9fb967/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1607ce49aa42f010d1e5e616d92ce899d66835d4d8bea49679582435285515de", size = 146727 }, + { url = "https://files.pythonhosted.org/packages/a3/e7/b0c04ea5af82c19fd5984bfe980f4012601c4708634c7c51a952b17c93b2/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343a4a4ebaa127451ff971254a4be4084eb4bdc0b2513c32b46f6f728fd03f9e", size = 174689 }, + { url = "https://files.pythonhosted.org/packages/13/0d/09a4ecb54c03db53d9eb5bbc81609d89de26e3762743f003282c1b48debb/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d", size = 150338 }, + { url = "https://files.pythonhosted.org/packages/4a/27/c5e3580a952a07e5a1428ae952874796870dc8db789f3d774e886160a9f4/dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393", size = 152800 }, + { url = "https://files.pythonhosted.org/packages/e4/c1/522041457444b67125ac9527208bb3148f63d7dce0a86ffa589ec763a10e/dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80", size = 101336 }, + { url = "https://files.pythonhosted.org/packages/72/2c/e33dfc96f974ae3cba82c9836371c93fcb4d59d5a82ebb853861618a0b0b/dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8", size = 169495 }, + { url = "https://files.pythonhosted.org/packages/17/af/4030827253a5d50eb8da6f7189bc33d3c850c4109cf3414910e9af677cb7/dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22", size = 116525 }, + { url = "https://files.pythonhosted.org/packages/10/10/5f9eed00b1186921e447960443f03cda6374cba8cd5cf7aff2b42ecb8a0e/dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b", size = 111436 }, + { url = "https://files.pythonhosted.org/packages/4a/da/3d3d04f7a572f7649f48edc9402ff5836e2f90e18445ffde110fd6142889/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760", size = 146828 }, + { url = "https://files.pythonhosted.org/packages/c4/12/0a8c2152655ca39c1059c762ea1dc12784166c735126eb0ab929c518ef4e/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb", size = 175054 }, + { url = "https://files.pythonhosted.org/packages/c9/d4/8cbb857612ca69763ee4f4f97c7b91659df1d373d62237cb9c772e55ae97/dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e", size = 152834 }, + { url = "https://files.pythonhosted.org/packages/ad/e3/96f5267fe5a47c882dce7f3d06b26ddd756681fc4fbedd55d51b78b08bca/dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715", size = 101754 }, +] + +[[package]] +name = "docker-pycreds" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/e6/d1f6c00b7221e2d7c4b470132c931325c8b22c51ca62417e300f5ce16009/docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4", size = 8754 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49", size = 8982 }, +] + +[[package]] +name = "docstring-parser" +version = "0.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/12/9c22a58c0b1e29271051222d8906257616da84135af9ed167c9e28f85cb3/docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e", size = 26565 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533 }, +] + [[package]] name = "editor" version = "1.6.6" @@ -296,6 +523,40 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b5/fd/afcd0496feca3276f509df3dbd5dae726fcc756f1a08d9e25abe1733f962/executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", size = 25805 }, ] +[[package]] +name = "fastapi" +version = "0.112.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9c/11/4874d165e7ef97aa803a567a4be8f9c8b0bd7cced6d536d44033ef7d4bfa/fastapi-0.112.2.tar.gz", hash = "sha256:3d4729c038414d5193840706907a41839d839523da6ed0c2811f1168cac1798c", size = 291967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/ae/6570ae1b67007735229f10f2e1174d6e33c056ee9c0c1754d432acbede94/fastapi-0.112.2-py3-none-any.whl", hash = "sha256:db84b470bd0e2b1075942231e90e3577e12a903c4dc8696f0d206a7904a7af1c", size = 93471 }, +] + +[[package]] +name = "fhaviary" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docstring-parser" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dd/39/aacce7ceb9a2695b532e8cdbf57437c17efe6cc3e0774b8f28e5d644d434/fhaviary-0.2.0.tar.gz", hash = "sha256:9ce627de0b6f12fda00cc2997f1fa5b3b6da824bb7ec9845aa8dc7828f8e1ec6", size = 120895 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/9c/4d67f1ec43307f2ed4d076ebde37e6c0f060531597dfe25dfd3f69582fb3/fhaviary-0.2.0-py3-none-any.whl", hash = "sha256:f7f4b699b17f93c122c7d859fd207c7b148fe04af396f81a4c3e23358424dc2e", size = 30230 }, +] + +[package.optional-dependencies] +xml = [ + { name = "dicttoxml" }, +] + [[package]] name = "filelock" version = "3.15.4" @@ -305,6 +566,79 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ae/f0/48285f0262fe47103a4a45972ed2f9b93e4c80b8fd609fa98da78b2a5706/filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7", size = 16159 }, ] +[[package]] +name = "fonttools" +version = "4.53.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/cb/cd80a0da995adde8ade6044a8744aee0da5efea01301cadf770f7fbe7dcc/fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4", size = 3452797 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/6a/206391c869ab22d1374e2575cad7cab36b93b9e3d37f48f4696eed2c6e9e/fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1", size = 2762654 }, + { url = "https://files.pythonhosted.org/packages/f5/7e/4060d88dbfaf446e1c9f0fe9cf13dba36ba47c4da85ce5c1df084ce47e7d/fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923", size = 2247865 }, + { url = "https://files.pythonhosted.org/packages/e1/67/fff766817e17d67208f8a1e72de15066149485acb5e4ff0816b11fd5fca3/fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719", size = 4873046 }, + { url = "https://files.pythonhosted.org/packages/a4/22/0a0ad59d9367997fd74a00ad2e88d10559122e09f105e94d34c155aecc0a/fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3", size = 4920859 }, + { url = "https://files.pythonhosted.org/packages/0b/c4/b4e2f1699a5e2244373a6e8175f862f49f377b444adc6c7b1fe1f5b3d04d/fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb", size = 4885904 }, + { url = "https://files.pythonhosted.org/packages/64/e7/b9a07c386adf8ad0348163fbcaab74daed6ef18ddb3f49b61b5c19900aeb/fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2", size = 5054708 }, + { url = "https://files.pythonhosted.org/packages/e9/53/2a79462ae38d7943e63290209c04fef89677c67b29cb329cdc549c18d4d5/fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88", size = 2158885 }, + { url = "https://files.pythonhosted.org/packages/c8/e1/059700c154bd7170d1c37061239836d2e51ff608f47075450f06dd3c292a/fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02", size = 2205133 }, + { url = "https://files.pythonhosted.org/packages/87/63/8271f50f3e7bff8b78e03914c4c2893f2f21bd4db2975c60d11ecfbdd174/fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58", size = 2756146 }, + { url = "https://files.pythonhosted.org/packages/dd/bd/cb8fd2dddd68089c112bf42a88afe188b8ace73f94406539857dcc9347a6/fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8", size = 2244990 }, + { url = "https://files.pythonhosted.org/packages/ae/71/2b9761e25697bdaf3dfe8269541bd4324f3eb0e4cc13f71d7f90cd272394/fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60", size = 4787604 }, + { url = "https://files.pythonhosted.org/packages/db/2b/5779cfd48625e013c2dfcf0c246474d5b1f5d061a5f1e476037bf9fff3a3/fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f", size = 4871141 }, + { url = "https://files.pythonhosted.org/packages/b8/3d/ac3cec35a503bf789d03e9d155a220c9e574f4f1573f00a3bea55695d535/fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2", size = 4764714 }, + { url = "https://files.pythonhosted.org/packages/ac/9f/27135ac0328e22cca1ba23ee6a1a1f971c13e9f0387adc5598d4635c501d/fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f", size = 5023568 }, + { url = "https://files.pythonhosted.org/packages/04/40/44d6a94e52e91fe104f9ca95944466af34828992cbc66b666f541de137f1/fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670", size = 2147572 }, + { url = "https://files.pythonhosted.org/packages/6d/9a/b695930e1b4e6929cc60e294489421632a05c105ac8c56ee63ef56a47872/fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab", size = 2193313 }, + { url = "https://files.pythonhosted.org/packages/e4/b9/0394d67056d4ad36a3807b439571934b318f1df925593a95e9ec0516b1a7/fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d", size = 1090472 }, +] + +[[package]] +name = "frozenlist" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/3d/2102257e7acad73efc4a0c306ad3953f68c504c16982bbdfee3ad75d8085/frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b", size = 37820 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/bc/8d33f2d84b9368da83e69e42720cff01c5e199b5a868ba4486189a4d8fa9/frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0", size = 97060 }, + { url = "https://files.pythonhosted.org/packages/af/b2/904500d6a162b98a70e510e743e7ea992241b4f9add2c8063bf666ca21df/frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49", size = 55347 }, + { url = "https://files.pythonhosted.org/packages/5b/9c/f12b69997d3891ddc0d7895999a00b0c6a67f66f79498c0e30f27876435d/frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced", size = 53374 }, + { url = "https://files.pythonhosted.org/packages/ac/6e/e0322317b7c600ba21dec224498c0c5959b2bce3865277a7c0badae340a9/frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0", size = 273288 }, + { url = "https://files.pythonhosted.org/packages/a7/76/180ee1b021568dad5b35b7678616c24519af130ed3fa1e0f1ed4014e0f93/frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106", size = 284737 }, + { url = "https://files.pythonhosted.org/packages/05/08/40159d706a6ed983c8aca51922a93fc69f3c27909e82c537dd4054032674/frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068", size = 280267 }, + { url = "https://files.pythonhosted.org/packages/e0/18/9f09f84934c2b2aa37d539a322267939770362d5495f37783440ca9c1b74/frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2", size = 258778 }, + { url = "https://files.pythonhosted.org/packages/b3/c9/0bc5ee7e1f5cc7358ab67da0b7dfe60fbd05c254cea5c6108e7d1ae28c63/frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19", size = 272276 }, + { url = "https://files.pythonhosted.org/packages/12/5d/147556b73a53ad4df6da8bbb50715a66ac75c491fdedac3eca8b0b915345/frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82", size = 272424 }, + { url = "https://files.pythonhosted.org/packages/83/61/2087bbf24070b66090c0af922685f1d0596c24bb3f3b5223625bdeaf03ca/frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec", size = 260881 }, + { url = "https://files.pythonhosted.org/packages/a8/be/a235bc937dd803258a370fe21b5aa2dd3e7bfe0287a186a4bec30c6cccd6/frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a", size = 282327 }, + { url = "https://files.pythonhosted.org/packages/5d/e7/b2469e71f082948066b9382c7b908c22552cc705b960363c390d2e23f587/frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74", size = 281502 }, + { url = "https://files.pythonhosted.org/packages/db/1b/6a5b970e55dffc1a7d0bb54f57b184b2a2a2ad0b7bca16a97ca26d73c5b5/frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2", size = 272292 }, + { url = "https://files.pythonhosted.org/packages/1a/05/ebad68130e6b6eb9b287dacad08ea357c33849c74550c015b355b75cc714/frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17", size = 44446 }, + { url = "https://files.pythonhosted.org/packages/b3/21/c5aaffac47fd305d69df46cfbf118768cdf049a92ee6b0b5cb029d449dcf/frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825", size = 50459 }, + { url = "https://files.pythonhosted.org/packages/b4/db/4cf37556a735bcdb2582f2c3fa286aefde2322f92d3141e087b8aeb27177/frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae", size = 93937 }, + { url = "https://files.pythonhosted.org/packages/46/03/69eb64642ca8c05f30aa5931d6c55e50b43d0cd13256fdd01510a1f85221/frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb", size = 53656 }, + { url = "https://files.pythonhosted.org/packages/3f/ab/c543c13824a615955f57e082c8a5ee122d2d5368e80084f2834e6f4feced/frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b", size = 51868 }, + { url = "https://files.pythonhosted.org/packages/a9/b8/438cfd92be2a124da8259b13409224d9b19ef8f5a5b2507174fc7e7ea18f/frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86", size = 280652 }, + { url = "https://files.pythonhosted.org/packages/54/72/716a955521b97a25d48315c6c3653f981041ce7a17ff79f701298195bca3/frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480", size = 286739 }, + { url = "https://files.pythonhosted.org/packages/65/d8/934c08103637567084568e4d5b4219c1016c60b4d29353b1a5b3587827d6/frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09", size = 289447 }, + { url = "https://files.pythonhosted.org/packages/70/bb/d3b98d83ec6ef88f9bd63d77104a305d68a146fd63a683569ea44c3085f6/frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a", size = 265466 }, + { url = "https://files.pythonhosted.org/packages/0b/f2/b8158a0f06faefec33f4dff6345a575c18095a44e52d4f10c678c137d0e0/frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd", size = 281530 }, + { url = "https://files.pythonhosted.org/packages/ea/a2/20882c251e61be653764038ece62029bfb34bd5b842724fff32a5b7a2894/frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6", size = 281295 }, + { url = "https://files.pythonhosted.org/packages/4c/f9/8894c05dc927af2a09663bdf31914d4fb5501653f240a5bbaf1e88cab1d3/frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1", size = 268054 }, + { url = "https://files.pythonhosted.org/packages/37/ff/a613e58452b60166507d731812f3be253eb1229808e59980f0405d1eafbf/frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b", size = 286904 }, + { url = "https://files.pythonhosted.org/packages/cc/6e/0091d785187f4c2020d5245796d04213f2261ad097e0c1cf35c44317d517/frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e", size = 290754 }, + { url = "https://files.pythonhosted.org/packages/a5/c2/e42ad54bae8bcffee22d1e12a8ee6c7717f7d5b5019261a8c861854f4776/frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8", size = 282602 }, + { url = "https://files.pythonhosted.org/packages/b6/61/56bad8cb94f0357c4bc134acc30822e90e203b5cb8ff82179947de90c17f/frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89", size = 44063 }, + { url = "https://files.pythonhosted.org/packages/3e/dc/96647994a013bc72f3d453abab18340b7f5e222b7b7291e3697ca1fcfbd5/frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5", size = 50452 }, + { url = "https://files.pythonhosted.org/packages/83/10/466fe96dae1bff622021ee687f68e5524d6392b0a2f80d05001cd3a451ba/frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7", size = 11552 }, +] + +[[package]] +name = "fsspec" +version = "2024.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/b6/eba5024a9889fcfff396db543a34bef0ab9d002278f163129f9f01005960/fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49", size = 284584 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/44/73bea497ac69bafde2ee4269292fa3b41f1198f4bb7bbaaabde30ad29d4a/fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e", size = 177561 }, +] + [[package]] name = "gitdb" version = "4.0.11" @@ -355,6 +689,62 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/53/80/3d94d5999b4179d91bcc93745d1b0815b073d61be79dd546b840d17adb18/greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2", size = 293635 }, ] +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/b0/5e8b8674f8d203335a62fdfcfa0d11ebe09e23613c3391033cbba35f7926/httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61", size = 83234 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/d4/e5d7e4f2174f8a4d63c8897d79eb8fe2503f7ecc03282fee1fa2719c2704/httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5", size = 77926 }, +] + +[[package]] +name = "httpx" +version = "0.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/82/08f8c936781f67d9e6b9eeb8a0c8b4e406136ea4c3d1f89a5db71d42e0e6/httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2", size = 144189 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/95/9377bcb415797e44274b51d46e3249eba641711cf3348050f76ee7b15ffc/httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0", size = 76395 }, +] + +[[package]] +name = "huggingface-hub" +version = "0.24.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/65/24/b98fce967b7d63700e5805b915012ba25bb538a81fcf11e97f3cc3f4f012/huggingface_hub-0.24.6.tar.gz", hash = "sha256:cc2579e761d070713eaa9c323e3debe39d5b464ae3a7261c39a9195b27bb8000", size = 349200 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/8f/d6718641c14d98a5848c6a24d2376028d292074ffade0702940a4b1dde76/huggingface_hub-0.24.6-py3-none-any.whl", hash = "sha256:a990f3232aa985fe749bc9474060cbad75e8b2f115f6665a9fda5b9c97818970", size = 417509 }, +] + [[package]] name = "humanize" version = "4.10.0" @@ -382,6 +772,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/22/7e/d71db821f177828df9dea8c42ac46473366f191be53080e552e628aad991/idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac", size = 66894 }, ] +[[package]] +name = "importlib-metadata" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269 }, +] + [[package]] name = "iniconfig" version = "2.0.0" @@ -447,6 +849,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/9f/bc63f0f0737ad7a60800bfd472a4836661adae21f9c2535f3957b1e54ceb/jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0", size = 1569361 }, ] +[[package]] +name = "jinja2" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", size = 240245 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", size = 133271 }, +] + [[package]] name = "jinxed" version = "1.3.0" @@ -459,6 +873,38 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/27/e3/0e0014d6ab159d48189e92044ace13b1e1fe9aa3024ba9f4e8cf172aa7c2/jinxed-1.3.0-py2.py3-none-any.whl", hash = "sha256:b993189f39dc2d7504d802152671535b06d380b26d78070559551cbf92df4fc5", size = 33085 }, ] +[[package]] +name = "jiter" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/1a/aa64be757afc614484b370a4d9fc1747dc9237b37ce464f7f9d9ca2a3d38/jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a", size = 158300 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/5f/3ac960ed598726aae46edea916e6df4df7ff6fe084bc60774b95cf3154e6/jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553", size = 284131 }, + { url = "https://files.pythonhosted.org/packages/03/eb/2308fa5f5c14c97c4c7720fef9465f1fa0771826cddb4eec9866bdd88846/jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3", size = 299310 }, + { url = "https://files.pythonhosted.org/packages/3c/f6/dba34ca10b44715fa5302b8e8d2113f72eb00a9297ddf3fa0ae4fd22d1d1/jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6", size = 332282 }, + { url = "https://files.pythonhosted.org/packages/69/f7/64e0a7439790ec47f7681adb3871c9d9c45fff771102490bbee5e92c00b7/jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4", size = 342370 }, + { url = "https://files.pythonhosted.org/packages/55/31/1efbfff2ae8e4d919144c53db19b828049ad0622a670be3bbea94a86282c/jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9", size = 363591 }, + { url = "https://files.pythonhosted.org/packages/30/c3/7ab2ca2276426a7398c6dfb651e38dbc81954c79a3bfbc36c514d8599499/jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614", size = 378551 }, + { url = "https://files.pythonhosted.org/packages/47/e7/5d88031cd743c62199b125181a591b1671df3ff2f6e102df85c58d8f7d31/jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e", size = 319152 }, + { url = "https://files.pythonhosted.org/packages/4c/2d/09ea58e1adca9f0359f3d41ef44a1a18e59518d7c43a21f4ece9e72e28c0/jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06", size = 357377 }, + { url = "https://files.pythonhosted.org/packages/7d/2f/83ff1058cb56fc3ff73e0d3c6440703ddc9cdb7f759b00cfbde8228fc435/jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403", size = 511091 }, + { url = "https://files.pythonhosted.org/packages/ae/c9/4f85f97c9894382ab457382337aea0012711baaa17f2ed55c0ff25f3668a/jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646", size = 492948 }, + { url = "https://files.pythonhosted.org/packages/4d/f2/2e987e0eb465e064c5f52c2f29c8d955452e3b316746e326269263bfb1b7/jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb", size = 195183 }, + { url = "https://files.pythonhosted.org/packages/ab/59/05d1c3203c349b37c4dd28b02b9b4e5915a7bcbd9319173b4548a67d2e93/jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae", size = 191032 }, + { url = "https://files.pythonhosted.org/packages/aa/bd/c3950e2c478161e131bed8cb67c36aed418190e2a961a1c981e69954e54b/jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a", size = 283511 }, + { url = "https://files.pythonhosted.org/packages/80/1c/8ce58d8c37a589eeaaa5d07d131fd31043886f5e77ab50c00a66d869a361/jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df", size = 296974 }, + { url = "https://files.pythonhosted.org/packages/4d/b8/6faeff9eed8952bed93a77ea1cffae7b946795b88eafd1a60e87a67b09e0/jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248", size = 331897 }, + { url = "https://files.pythonhosted.org/packages/4f/54/1d9a2209b46d39ce6f0cef3ad87c462f9c50312ab84585e6bd5541292b35/jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544", size = 342962 }, + { url = "https://files.pythonhosted.org/packages/2a/de/90360be7fc54b2b4c2dfe79eb4ed1f659fce9c96682e6a0be4bbe71371f7/jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba", size = 363844 }, + { url = "https://files.pythonhosted.org/packages/ba/ad/ef32b173191b7a53ea8a6757b80723cba321f8469834825e8c71c96bde17/jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f", size = 378709 }, + { url = "https://files.pythonhosted.org/packages/07/de/353ce53743c0defbbbd652e89c106a97dbbac4eb42c95920b74b5056b93a/jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e", size = 319038 }, + { url = "https://files.pythonhosted.org/packages/3f/92/42d47310bf9530b9dece9e2d7c6d51cf419af5586ededaf5e66622d160e2/jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a", size = 357763 }, + { url = "https://files.pythonhosted.org/packages/bd/8c/2bb76a9a84474d48fdd133d3445db8a4413da4e87c23879d917e000a9d87/jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e", size = 511031 }, + { url = "https://files.pythonhosted.org/packages/33/4f/9f23d79c0795e0a8e56e7988e8785c2dcda27e0ed37977256d50c77c6a19/jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338", size = 493042 }, + { url = "https://files.pythonhosted.org/packages/df/67/8a4f975aa834b8aecdb6b131422390173928fd47f42f269dcc32034ab432/jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4", size = 195405 }, + { url = "https://files.pythonhosted.org/packages/15/81/296b1e25c43db67848728cdab34ac3eb5c5cbb4955ceb3f51ae60d4a5e3d/jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5", size = 189720 }, +] + [[package]] name = "junitparser" version = "3.2.0" @@ -468,51 +914,158 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5a/f9/321d566c9f2af81fdb4bb3d5900214116b47be9e26b82219da8b818d9da9/junitparser-3.2.0-py2.py3-none-any.whl", hash = "sha256:e14fdc0a999edfc15889b637390e8ef6ca09a49532416d3bd562857d42d4b96d", size = 13394 }, ] +[[package]] +name = "kiwisolver" +version = "1.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/bd/e54734b47fa63b52e34bad5b60e6842628b9a47c14254c5557f2a4b37b2e/kiwisolver-1.4.6.tar.gz", hash = "sha256:3cda29d601445e6aa11f80d90a9b8c2ae501650c55d7ad29829bd44499c9e7e0", size = 97171 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/9a/d22443947c1bfacff8596fe588ac5c3bf9e88a1b557c9e3fec12f1487133/kiwisolver-1.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a5cb5abad1ad9c265eed7e058fefafeb7964565b93b397ba2f480faec8d674", size = 122442 }, + { url = "https://files.pythonhosted.org/packages/b8/70/888ce89a1a2b4fbeb141729a4ea73c7227e758a7a8e0f5f7b9a10c83e03a/kiwisolver-1.4.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e52b2568c47fb4f54d17576954e02b1de156c85152f87283a99db9670fd18c0", size = 65755 }, + { url = "https://files.pythonhosted.org/packages/17/fb/8d7100e23b1130256bb480a19e2a99b9100c268c0ea4ce6ce42073301ef0/kiwisolver-1.4.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00af95204100bc1d0f26e1ed52ec77d6e3da5c9b845c88d31875c164e4ba6c0c", size = 64311 }, + { url = "https://files.pythonhosted.org/packages/77/34/704e1e382a5b59097ed546d1093758e6c106eb4b5ffd9340704b6cb5414e/kiwisolver-1.4.6-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ab1fedf86f3951a9e90a64edd15f598860ed60cd3664259756f097d527b5ae", size = 1334259 }, + { url = "https://files.pythonhosted.org/packages/2f/8d/63f912f0f7245cbcd0ee9956585509dc85cb764adbe8d06e342596f03536/kiwisolver-1.4.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc985766bf20141ce64baecc39fb9fedbce094b2b8de1bb62676b79328988e4", size = 1426582 }, + { url = "https://files.pythonhosted.org/packages/37/cb/ae78c5a007d3c71aecd6d008c9923b29e14ef3663ccc2bc05f5dde9f071c/kiwisolver-1.4.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1942a155c737a7c3835a957897f0cc9ebc0085b7a75d934d86aecb1b27b8873", size = 1541078 }, + { url = "https://files.pythonhosted.org/packages/bf/69/f21cdc8537c08e7cc2097361b1d7ad3c47192509dfca7848e42abe6f2dec/kiwisolver-1.4.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f464403e391724f8e7dff188d3fb77a85bd1273b3fdba182e6671abcc44434f8", size = 1470055 }, + { url = "https://files.pythonhosted.org/packages/79/25/b4e6929ee94dcb3144cacb2f427dc27aaec42aeb9367421b2efd5f771393/kiwisolver-1.4.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce5efe545eea86f52ec5a1185e5052815ea86778e8268bad71fa46433f7c0bef", size = 1426374 }, + { url = "https://files.pythonhosted.org/packages/40/dd/11ae8c3276e7f975f8c7e6117184cc3afe1ef5a71745456926104b0c468d/kiwisolver-1.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cb30165f71b7b3378668346e220c81d590593a3a1ff76428a53780310df03f35", size = 2222229 }, + { url = "https://files.pythonhosted.org/packages/67/28/46b28157c237e6c08d40843f4d1ccb74ef727c6c420ced5864e0501b5200/kiwisolver-1.4.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5a987f740e1c9964e614acb87ba1f014b4be760a341effc8dc789913d1840e6", size = 2368628 }, + { url = "https://files.pythonhosted.org/packages/33/77/8d876efef1921bd918a9dca177fe5e9adda372b4b19300101a38aca5558b/kiwisolver-1.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f2ceaa6d0450623d108956647ef19a1a28c7e07880f1171c932477308d44d80b", size = 2329025 }, + { url = "https://files.pythonhosted.org/packages/fe/55/d9637527165b65fc3281d52415aaa12a8526200362ebd8ababb1962149c5/kiwisolver-1.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:788cbf99738f18ae8a27b9d4d7314502b4b917005cfdacd1d6a59038332ae24d", size = 2468484 }, + { url = "https://files.pythonhosted.org/packages/b7/7d/2297d08507d54a6a73c6f4377d8b7a795b4715caeca571e8a494f527d120/kiwisolver-1.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2f6668678a6b9488a7f8a6320e1b1c6396d179a976472dbc08d1600d04119511", size = 2284076 }, + { url = "https://files.pythonhosted.org/packages/8f/5b/64bc3c75c269b8820d98120c9d41079bac3129d83eed1892c50c4d760d2d/kiwisolver-1.4.6-cp311-cp311-win32.whl", hash = "sha256:10a09a3e4213c2806bcfd2eb4edb756c557973d2cacf06873b18a247fce897da", size = 46643 }, + { url = "https://files.pythonhosted.org/packages/5f/40/2b03bc57af2e41da8fec1b30d16f733ee28b85888a5368688e99d5c664c2/kiwisolver-1.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:683ffef2c51fdc54112dc610d06b59b88c21e23fb669b905da6d5bec80da1bde", size = 56023 }, + { url = "https://files.pythonhosted.org/packages/23/f1/ea9a7d24ad1f8c4144dd4eefb11d456f2aa90fc59a6f373b8de5f770f35a/kiwisolver-1.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:3b852c7f0ed9a2fd339c228829bca0964233ed45de50aae3e87b72ca37d177f8", size = 48540 }, + { url = "https://files.pythonhosted.org/packages/81/84/8a5b628c6f2e661a48b296f335fdc1503f8ab5f49782db79217439853a34/kiwisolver-1.4.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:979df7e9334f6a3694ee9be8d42817e519ef6d155a16499714d082cf41296852", size = 121807 }, + { url = "https://files.pythonhosted.org/packages/7f/2f/92210b842c87a22df953727c2ba2ceb28827de3a47dc157701d4524885fa/kiwisolver-1.4.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50c9c6c42bb6ca231626d1182b9128e89c5ce3c64456f811ff0280deb42d7bfe", size = 65529 }, + { url = "https://files.pythonhosted.org/packages/51/8f/e1a0d4be4f549a2ef7a9dfa478b50ee92e9b9156e0194072c96f8b934757/kiwisolver-1.4.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ace86489e7951bd26329a589198d3875c3d48380f889c69d3eb254b506a80101", size = 63895 }, + { url = "https://files.pythonhosted.org/packages/6d/0a/b82266406429d3a114f861d9dbfe08eee9ba1475ea09a75cefc0ccbb5372/kiwisolver-1.4.6-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f94771988da902b475f78e85cf63c5c94392773b4a6494234d87c1b363b2fbc5", size = 1369291 }, + { url = "https://files.pythonhosted.org/packages/61/b5/2e68f99a97131bd06f7eeca5294541cde33d92af875f945cd4c1296c94a4/kiwisolver-1.4.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a62379eee430b1c477bb0a0bf6858a57c7c0dad9cee8b3144a5cb5d366c66a54", size = 1461449 }, + { url = "https://files.pythonhosted.org/packages/89/9e/47db05cdf2109a2e375c076fce6088c5fb493331225f8e588bd1992f287c/kiwisolver-1.4.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e99b97d69499a7414572c906fbc7ca312519f2e17999730129f6c4492786e953", size = 1579164 }, + { url = "https://files.pythonhosted.org/packages/1f/d7/b6d86dd194ef83a259e30e2356c3ed16b40f59053a13dd6295c2ab99816e/kiwisolver-1.4.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab93f58afe3a02922a343189404f24ed885564e6316649790240124b95ef1d6e", size = 1507308 }, + { url = "https://files.pythonhosted.org/packages/5a/5f/f011569a61d48b39bbf606d675de79c85effabc1fbd291a6396538a8cc87/kiwisolver-1.4.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34105f4460ba50fc18a16a8e77a5122f7affe075628763fda748ad0ec534c3ee", size = 1464190 }, + { url = "https://files.pythonhosted.org/packages/97/44/9adc480290c47e3a3f2bac4e1de2b3f164fef80c50b2e68d25f1503e36de/kiwisolver-1.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0081f85f0222620563409d4804c6567a930a45dafbe9674c7913fde131653992", size = 2247874 }, + { url = "https://files.pythonhosted.org/packages/5c/5a/0585de17a52af89689b9d79575317e54d5541bc8d0aa8449faf8b9fd58a2/kiwisolver-1.4.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:df2a4a7cc2e01991e039a792457751b601bdf30143ab5f23f9a1e58f20c875f4", size = 2404199 }, + { url = "https://files.pythonhosted.org/packages/05/9c/fb3c9c407415126e31918db329de230988e779c9205a7099d674e5f64775/kiwisolver-1.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1f401784df70ea2870e4e10adade66b5b06cb2c151bc2a8a414a1d10554e9a81", size = 2352457 }, + { url = "https://files.pythonhosted.org/packages/e2/e7/d7768d21801eb26dea144c264b4da10b995af279b017056c242c8f2ed5ac/kiwisolver-1.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:b19761c8c613b6d04c44f1a4797a144b44136f17ec009ccfb025e17b5698140c", size = 2501358 }, + { url = "https://files.pythonhosted.org/packages/92/47/fe6423e99e5e00fd8c014013bd146c019b299defee0d78a8fc8619c77d88/kiwisolver-1.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee7289430ded484cc2eff9d8ffcce58ed7fe2c26919321dbc0580322a49e0120", size = 2314119 }, + { url = "https://files.pythonhosted.org/packages/ee/90/d6ede870b381ae0386475c20327b1c8e52d3b3ced0c9a6d4f1bb067bfba2/kiwisolver-1.4.6-cp312-cp312-win32.whl", hash = "sha256:331b9d9f408e874ecf34bd79b79df8e099f0b1b351b8844609c1bfdc8d2d45b2", size = 46366 }, + { url = "https://files.pythonhosted.org/packages/6a/32/755f2db6c96102ebb7f078f617b17be18d5ba9ecf1423fec2194e5812451/kiwisolver-1.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:a9be95d086578b3ada61a4621c0e7ee5f456820bfdccc3329061fdeae1e31179", size = 55882 }, + { url = "https://files.pythonhosted.org/packages/85/86/1c4fb5c0cd90bd1048d7de3febc8f94cd015d8c523d96a7aff4fc0045b00/kiwisolver-1.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:773f2d87825779ab69196dfcf63e9d91043273421c6128c8d4ed82bc6316068f", size = 48533 }, + { url = "https://files.pythonhosted.org/packages/6d/16/de2395bfdc90626e8e7dc86f2349b0802d7e91d9cce4cd992c69ba2a8084/kiwisolver-1.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:140f376c22b5148453acff768cff19c34ebbd593126617018732ea1d9ce65547", size = 48533 }, +] + [[package]] name = "ldp" -version = "0.1.dev1+g713bff3.d20240903" +version = "0.1.dev6+g4bc481a.d20240904" source = { editable = "." } +dependencies = [ + { name = "aiofiles" }, + { name = "dm-tree" }, + { name = "fhaviary" }, + { name = "httpx" }, + { name = "litellm" }, + { name = "networkx", extra = ["default"] }, + { name = "numpy" }, + { name = "openai" }, + { name = "pydantic" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, + { name = "tiktoken" }, + { name = "torch" }, + { name = "tqdm" }, + { name = "transformers" }, + { name = "usearch" }, +] + +[package.optional-dependencies] +monitor = [ + { name = "wandb" }, +] +server = [ + { name = "fastapi" }, +] +typing = [ + { name = "types-aiofiles" }, + { name = "types-tqdm" }, +] +visualization = [ + { name = "pydot" }, +] [package.dev-dependencies] dev = [ { name = "build" }, { name = "codeflash" }, + { name = "fastapi" }, + { name = "fhaviary", extra = ["xml"] }, { name = "ipython" }, + { name = "litellm" }, { name = "mypy" }, { name = "pre-commit" }, + { name = "pydot" }, { name = "pylint" }, { name = "pylint-pydantic" }, { name = "pytest" }, { name = "pytest-asyncio" }, + { name = "pytest-rerunfailures" }, { name = "pytest-subtests" }, { name = "pytest-sugar" }, { name = "pytest-timer", extra = ["colorama"] }, { name = "pytest-xdist" }, { name = "refurb" }, { name = "sqlalchemy", extra = ["aiosqlite"] }, - { name = "typeguard" }, + { name = "types-aiofiles" }, + { name = "types-tqdm" }, + { name = "wandb" }, ] [package.metadata] +requires-dist = [ + { name = "aiofiles" }, + { name = "dm-tree" }, + { name = "fastapi", marker = "extra == 'server'", specifier = ">=0.109" }, + { name = "fhaviary" }, + { name = "httpx" }, + { name = "litellm" }, + { name = "networkx", extras = ["default"], specifier = "~=3.0" }, + { name = "numpy" }, + { name = "openai", specifier = ">=1" }, + { name = "pydantic", specifier = "~=2.0" }, + { name = "pydot", marker = "extra == 'visualization'", specifier = "~=2.0" }, + { name = "sqlalchemy", specifier = "~=2.0" }, + { name = "tenacity" }, + { name = "tiktoken" }, + { name = "torch" }, + { name = "tqdm" }, + { name = "transformers" }, + { name = "types-aiofiles", marker = "extra == 'typing'" }, + { name = "types-tqdm", marker = "extra == 'typing'" }, + { name = "usearch", specifier = ">=2.13" }, + { name = "wandb", marker = "extra == 'monitor'" }, +] [package.metadata.requires-dev] dev = [ { name = "build" }, { name = "codeflash" }, + { name = "fhaviary", extras = ["xml"] }, { name = "ipython", specifier = ">=8" }, + { name = "ldp", extras = ["monitor", "server", "typing", "visualization"] }, + { name = "litellm", specifier = ">=1.40.9,<=1.40.12" }, { name = "mypy", specifier = ">=1.8" }, { name = "pre-commit", specifier = "~=3.4" }, { name = "pylint", specifier = ">=3.2" }, { name = "pylint-pydantic" }, { name = "pytest", specifier = ">=8" }, { name = "pytest-asyncio" }, + { name = "pytest-rerunfailures" }, { name = "pytest-subtests" }, { name = "pytest-sugar" }, { name = "pytest-timer", extras = ["colorama"] }, { name = "pytest-xdist" }, { name = "refurb", specifier = ">=2" }, { name = "sqlalchemy", extras = ["aiosqlite"], specifier = "~=2.0" }, - { name = "typeguard" }, ] [[package]] @@ -538,6 +1091,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/32/7345f10a2dc728015920d689d5c1b8dc0232db321e172cdad2611e73c5b3/libcst-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:62e2682ee1567b6a89c91853865372bf34f178bfd237853d84df2b87b446e654", size = 2026263 }, ] +[[package]] +name = "litellm" +version = "1.40.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "click" }, + { name = "importlib-metadata" }, + { name = "jinja2" }, + { name = "openai" }, + { name = "python-dotenv" }, + { name = "requests" }, + { name = "tiktoken" }, + { name = "tokenizers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/47/6c/ac51d9cf596da82d07bff5ff90f1c8f4897f8c758ea0666ef207e8d63b38/litellm-1.40.12.tar.gz", hash = "sha256:366bb9c3694b9ef59b3d073bb37ff9ca175ab4090dc187b0a11d2b21db3a6a5d", size = 6043010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/cd/90a633bf7738ccc70645f9c7b4734ae99397b5040e3e6fa03d7801152ff1/litellm-1.40.12-py3-none-any.whl", hash = "sha256:42f1648507f29c60543ba5fdf35d38fc161694da043b201508225bae50d3328c", size = 6258659 }, +] + [[package]] name = "lxml" version = "5.3.0" @@ -597,6 +1170,76 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, ] +[[package]] +name = "markupsafe" +version = "2.1.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/5b/aae44c6655f3801e81aa3eef09dbbf012431987ba564d7231722f68df02d/MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", size = 19384 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/e7/291e55127bb2ae67c64d66cef01432b5933859dfb7d6949daa721b89d0b3/MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", size = 18219 }, + { url = "https://files.pythonhosted.org/packages/6b/cb/aed7a284c00dfa7c0682d14df85ad4955a350a21d2e3b06d8240497359bf/MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", size = 14098 }, + { url = "https://files.pythonhosted.org/packages/1c/cf/35fe557e53709e93feb65575c93927942087e9b97213eabc3fe9d5b25a55/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", size = 29014 }, + { url = "https://files.pythonhosted.org/packages/97/18/c30da5e7a0e7f4603abfc6780574131221d9148f323752c2755d48abad30/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", size = 28220 }, + { url = "https://files.pythonhosted.org/packages/0c/40/2e73e7d532d030b1e41180807a80d564eda53babaf04d65e15c1cf897e40/MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", size = 27756 }, + { url = "https://files.pythonhosted.org/packages/18/46/5dca760547e8c59c5311b332f70605d24c99d1303dd9a6e1fc3ed0d73561/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", size = 33988 }, + { url = "https://files.pythonhosted.org/packages/6d/c5/27febe918ac36397919cd4a67d5579cbbfa8da027fa1238af6285bb368ea/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", size = 32718 }, + { url = "https://files.pythonhosted.org/packages/f8/81/56e567126a2c2bc2684d6391332e357589a96a76cb9f8e5052d85cb0ead8/MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", size = 33317 }, + { url = "https://files.pythonhosted.org/packages/00/0b/23f4b2470accb53285c613a3ab9ec19dc944eaf53592cb6d9e2af8aa24cc/MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", size = 16670 }, + { url = "https://files.pythonhosted.org/packages/b7/a2/c78a06a9ec6d04b3445a949615c4c7ed86a0b2eb68e44e7541b9d57067cc/MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", size = 17224 }, + { url = "https://files.pythonhosted.org/packages/53/bd/583bf3e4c8d6a321938c13f49d44024dbe5ed63e0a7ba127e454a66da974/MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", size = 18215 }, + { url = "https://files.pythonhosted.org/packages/48/d6/e7cd795fc710292c3af3a06d80868ce4b02bfbbf370b7cee11d282815a2a/MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", size = 14069 }, + { url = "https://files.pythonhosted.org/packages/51/b5/5d8ec796e2a08fc814a2c7d2584b55f889a55cf17dd1a90f2beb70744e5c/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", size = 29452 }, + { url = "https://files.pythonhosted.org/packages/0a/0d/2454f072fae3b5a137c119abf15465d1771319dfe9e4acbb31722a0fff91/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", size = 28462 }, + { url = "https://files.pythonhosted.org/packages/2d/75/fd6cb2e68780f72d47e6671840ca517bda5ef663d30ada7616b0462ad1e3/MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", size = 27869 }, + { url = "https://files.pythonhosted.org/packages/b0/81/147c477391c2750e8fc7705829f7351cf1cd3be64406edcf900dc633feb2/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", size = 33906 }, + { url = "https://files.pythonhosted.org/packages/8b/ff/9a52b71839d7a256b563e85d11050e307121000dcebc97df120176b3ad93/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", size = 32296 }, + { url = "https://files.pythonhosted.org/packages/88/07/2dc76aa51b481eb96a4c3198894f38b480490e834479611a4053fbf08623/MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", size = 33038 }, + { url = "https://files.pythonhosted.org/packages/96/0c/620c1fb3661858c0e37eb3cbffd8c6f732a67cd97296f725789679801b31/MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", size = 16572 }, + { url = "https://files.pythonhosted.org/packages/3f/14/c3554d512d5f9100a95e737502f4a2323a1959f6d0d01e0d0997b35f7b10/MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", size = 17127 }, +] + +[[package]] +name = "matplotlib" +version = "3.9.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/d8/3d7f706c69e024d4287c1110d74f7dabac91d9843b99eadc90de9efc8869/matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92", size = 36088381 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/c2/f9d7fe80a8fcce9bb128d1381c6fe41a8d286d7e18395e273002e8e0fa34/matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772", size = 7902925 }, + { url = "https://files.pythonhosted.org/packages/28/ba/8be09886eb56ac04a218a1dc3fa728a5c4cac60b019b4f1687885166da00/matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41", size = 7773193 }, + { url = "https://files.pythonhosted.org/packages/e6/9a/5991972a560db3ab621312a7ca5efec339ae2122f25901c0846865c4b72f/matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f", size = 8202378 }, + { url = "https://files.pythonhosted.org/packages/01/75/6c7ce560e95714a10fcbb3367d1304975a1a3e620f72af28921b796403f3/matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447", size = 8314361 }, + { url = "https://files.pythonhosted.org/packages/6e/49/dc7384c6c092958e0b75e754efbd9e52500154939c3d715789cee9fb8a53/matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e", size = 9091428 }, + { url = "https://files.pythonhosted.org/packages/8b/ce/15b0bb2fb29b3d46211d8ca740b96b5232499fc49200b58b8d571292c9a6/matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7", size = 7829377 }, + { url = "https://files.pythonhosted.org/packages/82/de/54f7f38ce6de79cb77d513bb3eaa4e0b1031e9fd6022214f47943fa53a88/matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9", size = 7892511 }, + { url = "https://files.pythonhosted.org/packages/35/3e/5713b84a02b24b2a4bd4d6673bfc03017e6654e1d8793ece783b7ed4d484/matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d", size = 7769370 }, + { url = "https://files.pythonhosted.org/packages/5b/bd/c404502aa1824456d2862dd6b9b0c1917761a51a32f7f83ff8cf94b6d117/matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7", size = 8193260 }, + { url = "https://files.pythonhosted.org/packages/27/75/de5b9cd67648051cae40039da0c8cbc497a0d99acb1a1f3d087cd66d27b7/matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c", size = 8306310 }, + { url = "https://files.pythonhosted.org/packages/de/e3/2976e4e54d7ee76eaf54b7639fdc10a223d05c2bdded7045233e9871e469/matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e", size = 9086717 }, + { url = "https://files.pythonhosted.org/packages/d2/92/c2b9464a0562feb6ae780bdc152364810862e07ef5e6affa2b7686028db2/matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3", size = 7832805 }, + { url = "https://files.pythonhosted.org/packages/5c/7f/8932eac316b32f464b8f9069f151294dcd892c8fbde61fe8bcd7ba7f7f7e/matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9", size = 7893012 }, + { url = "https://files.pythonhosted.org/packages/90/89/9db9db3dd0ff3e2c49e452236dfe29e60b5586a88f8928ca1d153d0da8b5/matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa", size = 7769810 }, + { url = "https://files.pythonhosted.org/packages/67/26/d2661cdc2e1410b8929c5f12dfd521e4528abfed1b3c3d5a28ac48258b43/matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b", size = 8193779 }, + { url = "https://files.pythonhosted.org/packages/95/70/4839eaa672bf4eacc98ebc8d23633e02b6daf39e294e7433c4ab11a689be/matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413", size = 8306260 }, + { url = "https://files.pythonhosted.org/packages/88/62/7b263b2cb2724b45d3a4f9c8c6137696cc3ef037d44383fb01ac2a9555c2/matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b", size = 9086073 }, + { url = "https://files.pythonhosted.org/packages/b0/6d/3572fe243c74112fef120f0bc86f5edd21f49b60e8322fc7f6a01fe945dd/matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49", size = 7833041 }, + { url = "https://files.pythonhosted.org/packages/03/8f/9d505be3eb2f40ec731674fb6b47d10cc3147bbd6a9ea7a08c8da55415c6/matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03", size = 7933657 }, + { url = "https://files.pythonhosted.org/packages/5d/68/44b458b9794bcff2a66921f8c9a8110a50a0bb099bd5f7cabb428a1dc765/matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30", size = 7799276 }, + { url = "https://files.pythonhosted.org/packages/47/79/8486d4ddcaaf676314b5fb58e8fe19d1a6210a443a7c31fa72d4215fcb87/matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51", size = 8221027 }, + { url = "https://files.pythonhosted.org/packages/56/62/72a472181578c3d035dcda0d0fa2e259ba2c4cb91132588a348bb705b70d/matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c", size = 8329097 }, + { url = "https://files.pythonhosted.org/packages/01/8a/760f7fce66b39f447ad160800619d0bd5d0936d2b4633587116534a4afe0/matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e", size = 9093770 }, +] + [[package]] name = "matplotlib-inline" version = "0.1.7" @@ -627,6 +1270,54 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c", size = 8154 }, ] +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + +[[package]] +name = "multidict" +version = "6.0.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/79/722ca999a3a09a63b35aac12ec27dfa8e5bb3a38b0f857f7a1a209a88836/multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da", size = 59867 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/da/b10ea65b850b54f44a6479177c6987f456bc2d38f8dc73009b78afcf0ede/multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba", size = 50815 }, + { url = "https://files.pythonhosted.org/packages/21/db/3403263f158b0bc7b0d4653766d71cb39498973f2042eead27b2e9758782/multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e", size = 30269 }, + { url = "https://files.pythonhosted.org/packages/02/c1/b15ecceb6ffa5081ed2ed450aea58d65b0e0358001f2b426705f9f41f4c2/multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd", size = 30500 }, + { url = "https://files.pythonhosted.org/packages/3f/e1/7fdd0f39565df3af87d6c2903fb66a7d529fbd0a8a066045d7a5b6ad1145/multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3", size = 130751 }, + { url = "https://files.pythonhosted.org/packages/76/bc/9f593f9e38c6c09bbf0344b56ad67dd53c69167937c2edadee9719a5e17d/multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf", size = 138185 }, + { url = "https://files.pythonhosted.org/packages/28/32/d7799a208701d537b92705f46c777ded812a6dc139c18d8ed599908f6b1c/multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29", size = 133585 }, + { url = "https://files.pythonhosted.org/packages/52/ec/be54a3ad110f386d5bd7a9a42a4ff36b3cd723ebe597f41073a73ffa16b8/multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed", size = 128684 }, + { url = "https://files.pythonhosted.org/packages/36/e1/a680eabeb71e25d4733276d917658dfa1cd3a99b1223625dbc247d266c98/multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733", size = 120994 }, + { url = "https://files.pythonhosted.org/packages/ef/08/08f4f44a8a43ea4cee13aa9cdbbf4a639af8db49310a0637ca389c4cf817/multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f", size = 159689 }, + { url = "https://files.pythonhosted.org/packages/aa/a9/46cdb4cb40bbd4b732169413f56b04a6553460b22bd914f9729c9ba63761/multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4", size = 150611 }, + { url = "https://files.pythonhosted.org/packages/e9/32/35668bb3e6ab2f12f4e4f7f4000f72f714882a94f904d4c3633fbd036753/multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1", size = 164444 }, + { url = "https://files.pythonhosted.org/packages/fa/10/f1388a91552af732d8ec48dab928abc209e732767e9e8f92d24c3544353c/multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc", size = 160158 }, + { url = "https://files.pythonhosted.org/packages/14/c3/f602601f1819983e018156e728e57b3f19726cb424b543667faab82f6939/multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e", size = 156072 }, + { url = "https://files.pythonhosted.org/packages/82/a6/0290af8487326108c0d03d14f8a0b8b1001d71e4494df5f96ab0c88c0b88/multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c", size = 25731 }, + { url = "https://files.pythonhosted.org/packages/88/aa/ea217cb18325aa05cb3e3111c19715f1e97c50a4a900cbc20e54648de5f5/multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea", size = 28176 }, + { url = "https://files.pythonhosted.org/packages/90/9c/7fda9c0defa09538c97b1f195394be82a1f53238536f70b32eb5399dfd4e/multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e", size = 49575 }, + { url = "https://files.pythonhosted.org/packages/be/21/d6ca80dd1b9b2c5605ff7475699a8ff5dc6ea958cd71fb2ff234afc13d79/multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b", size = 29638 }, + { url = "https://files.pythonhosted.org/packages/9c/18/9565f32c19d186168731e859692dfbc0e98f66a1dcf9e14d69c02a78b75a/multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5", size = 29874 }, + { url = "https://files.pythonhosted.org/packages/4e/4e/3815190e73e6ef101b5681c174c541bf972a1b064e926e56eea78d06e858/multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450", size = 129914 }, + { url = "https://files.pythonhosted.org/packages/0c/08/bb47f886457e2259aefc10044e45c8a1b62f0c27228557e17775869d0341/multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496", size = 134589 }, + { url = "https://files.pythonhosted.org/packages/d5/2f/952f79b5f0795cf4e34852fc5cf4dfda6166f63c06c798361215b69c131d/multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a", size = 133259 }, + { url = "https://files.pythonhosted.org/packages/24/1f/af976383b0b772dd351210af5b60ff9927e3abb2f4a103e93da19a957da0/multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226", size = 130779 }, + { url = "https://files.pythonhosted.org/packages/fc/b1/b0a7744be00b0f5045c7ed4e4a6b8ee6bde4672b2c620474712299df5979/multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271", size = 120125 }, + { url = "https://files.pythonhosted.org/packages/d0/bf/2a1d667acf11231cdf0b97a6cd9f30e7a5cf847037b5cf6da44884284bd0/multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb", size = 167095 }, + { url = "https://files.pythonhosted.org/packages/5e/e8/ad6ee74b1a2050d3bc78f566dabcc14c8bf89cbe87eecec866c011479815/multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef", size = 155823 }, + { url = "https://files.pythonhosted.org/packages/45/7c/06926bb91752c52abca3edbfefac1ea90d9d1bc00c84d0658c137589b920/multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24", size = 170233 }, + { url = "https://files.pythonhosted.org/packages/3c/29/3dd36cf6b9c5abba8b97bba84eb499a168ba59c3faec8829327b3887d123/multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6", size = 169035 }, + { url = "https://files.pythonhosted.org/packages/60/47/9a0f43470c70bbf6e148311f78ef5a3d4996b0226b6d295bdd50fdcfe387/multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda", size = 166229 }, + { url = "https://files.pythonhosted.org/packages/1d/23/c1b7ae7a0b8a3e08225284ef3ecbcf014b292a3ee821bc4ed2185fd4ce7d/multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5", size = 25840 }, + { url = "https://files.pythonhosted.org/packages/4a/68/66fceb758ad7a88993940dbdf3ac59911ba9dc46d7798bf6c8652f89f853/multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556", size = 27905 }, + { url = "https://files.pythonhosted.org/packages/fa/a2/17e1e23c6be0a916219c5292f509360c345b5fa6beeb50d743203c27532c/multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7", size = 9729 }, +] + [[package]] name = "mypy" version = "1.11.2" @@ -659,6 +1350,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 }, ] +[[package]] +name = "networkx" +version = "3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/e6/b164f94c869d6b2c605b5128b7b0cfe912795a87fc90e78533920001f3ec/networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9", size = 2126579 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/e9/5f72929373e1a0e8d142a130f3f97e6ff920070f87f91c4e13e40e0fba5a/networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2", size = 1702396 }, +] + +[package.optional-dependencies] +default = [ + { name = "matplotlib" }, + { name = "numpy" }, + { name = "pandas" }, + { name = "scipy" }, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -668,6 +1376,191 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, ] +[[package]] +name = "numpy" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/59/5f/9003bb3e632f2b58f5e3a3378902dcc73c5518070736c6740fe52454e8e1/numpy-2.1.1.tar.gz", hash = "sha256:d0cf7d55b1051387807405b3898efafa862997b4cba8aa5dbe657be794afeafd", size = 18874860 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/86/2c01070424a42b286ea0271203682c3d3e81e10ce695545b35768307b383/numpy-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0d07841fd284718feffe7dd17a63a2e6c78679b2d386d3e82f44f0108c905550", size = 21154850 }, + { url = "https://files.pythonhosted.org/packages/ef/4e/d3426d9e620a18bbb979f28e4dc7f9a2c35eb7cf726ffcb33545ebdd3e6a/numpy-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b5613cfeb1adfe791e8e681128f5f49f22f3fcaa942255a6124d58ca59d9528f", size = 13789477 }, + { url = "https://files.pythonhosted.org/packages/c6/6e/fb6b1b2da9f4c757f55b202f10b6af0fe4fee87ace6e830228a12ab8ae5d/numpy-2.1.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0b8cc2715a84b7c3b161f9ebbd942740aaed913584cae9cdc7f8ad5ad41943d0", size = 5351769 }, + { url = "https://files.pythonhosted.org/packages/58/9a/07c8a9dc7254f3265ae014e33768d1cfd8eb73ee6cf215f4ec3b497e4255/numpy-2.1.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b49742cdb85f1f81e4dc1b39dcf328244f4d8d1ded95dea725b316bd2cf18c95", size = 6890872 }, + { url = "https://files.pythonhosted.org/packages/08/4e/3b50fa3b1e045793056ed5a1fc6f89dd897ff9cb00900ca6377fe552d442/numpy-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8d5f8a8e3bc87334f025194c6193e408903d21ebaeb10952264943a985066ca", size = 13984256 }, + { url = "https://files.pythonhosted.org/packages/d9/37/108d692f7e2544b9ae972c7bfa06c26717871c273ccec86470bc3132b04d/numpy-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d51fc141ddbe3f919e91a096ec739f49d686df8af254b2053ba21a910ae518bf", size = 16337778 }, + { url = "https://files.pythonhosted.org/packages/95/2d/df81a1be3be6d3a92fd12dfd6c26a0dc026b276136ec1056562342a484a2/numpy-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:98ce7fb5b8063cfdd86596b9c762bf2b5e35a2cdd7e967494ab78a1fa7f8b86e", size = 16710448 }, + { url = "https://files.pythonhosted.org/packages/8f/34/4b2e604c5c44bd64b6c85e89d88871b41e60233b3ddf97419b37ae5b0c72/numpy-2.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:24c2ad697bd8593887b019817ddd9974a7f429c14a5469d7fad413f28340a6d2", size = 14489002 }, + { url = "https://files.pythonhosted.org/packages/9f/0d/67c04b6bfefd0abbe7f60f7e4f11e3aca15d688faec1d1df089966105a9a/numpy-2.1.1-cp311-cp311-win32.whl", hash = "sha256:397bc5ce62d3fb73f304bec332171535c187e0643e176a6e9421a6e3eacef06d", size = 6533215 }, + { url = "https://files.pythonhosted.org/packages/94/7a/4c00332a3ca79702bbc86228afd0e84e6f91b47222ec8cdf00677dd16481/numpy-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:ae8ce252404cdd4de56dcfce8b11eac3c594a9c16c231d081fb705cf23bd4d9e", size = 12870550 }, + { url = "https://files.pythonhosted.org/packages/36/11/c573ef66c004f991989c2c6218229d9003164525549409aec5ec9afc0285/numpy-2.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c803b7934a7f59563db459292e6aa078bb38b7ab1446ca38dd138646a38203e", size = 20884403 }, + { url = "https://files.pythonhosted.org/packages/6b/6c/a9fbef5fd2f9685212af2a9e47485cde9357c3e303e079ccf85127516f2d/numpy-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6435c48250c12f001920f0751fe50c0348f5f240852cfddc5e2f97e007544cbe", size = 13493375 }, + { url = "https://files.pythonhosted.org/packages/34/f2/1316a6b08ad4c161d793abe81ff7181e9ae2e357a5b06352a383b9f8e800/numpy-2.1.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3269c9eb8745e8d975980b3a7411a98976824e1fdef11f0aacf76147f662b15f", size = 5088823 }, + { url = "https://files.pythonhosted.org/packages/be/15/fabf78a6d4a10c250e87daf1cd901af05e71501380532ac508879cc46a7e/numpy-2.1.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:fac6e277a41163d27dfab5f4ec1f7a83fac94e170665a4a50191b545721c6521", size = 6619825 }, + { url = "https://files.pythonhosted.org/packages/9f/8a/76ddef3e621541ddd6984bc24d256a4e3422d036790cbbe449e6cad439ee/numpy-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcd8f556cdc8cfe35e70efb92463082b7f43dd7e547eb071ffc36abc0ca4699b", size = 13696705 }, + { url = "https://files.pythonhosted.org/packages/cb/22/2b840d297183916a95847c11f82ae11e248fa98113490b2357f774651e1d/numpy-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b9cd92c8f8e7b313b80e93cedc12c0112088541dcedd9197b5dee3738c1201", size = 16041649 }, + { url = "https://files.pythonhosted.org/packages/c7/e8/6f4825d8f576cfd5e4d6515b9eec22bd618868bdafc8a8c08b446dcb65f0/numpy-2.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:afd9c680df4de71cd58582b51e88a61feed4abcc7530bcd3d48483f20fc76f2a", size = 16409358 }, + { url = "https://files.pythonhosted.org/packages/bf/f8/5edf1105b0dc24fd66fc3e9e7f3bca3d920cde571caaa4375ec1566073c3/numpy-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8661c94e3aad18e1ea17a11f60f843a4933ccaf1a25a7c6a9182af70610b2313", size = 14172488 }, + { url = "https://files.pythonhosted.org/packages/f4/c2/dddca3e69a024d2f249a5b68698328163cbdafb7e65fbf6d36373bbabf12/numpy-2.1.1-cp312-cp312-win32.whl", hash = "sha256:950802d17a33c07cba7fd7c3dcfa7d64705509206be1606f196d179e539111ed", size = 6237195 }, + { url = "https://files.pythonhosted.org/packages/b7/98/5640a09daa3abf0caeaefa6e7bf0d10c0aa28a77c84e507d6a716e0e23df/numpy-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:3fc5eabfc720db95d68e6646e88f8b399bfedd235994016351b1d9e062c4b270", size = 12568082 }, + { url = "https://files.pythonhosted.org/packages/6b/9e/8bc6f133bc6d359ccc9ec051853aded45504d217685191f31f46d36b7065/numpy-2.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:046356b19d7ad1890c751b99acad5e82dc4a02232013bd9a9a712fddf8eb60f5", size = 20834810 }, + { url = "https://files.pythonhosted.org/packages/32/1b/429519a2fa28681814c511574017d35f3aab7136d554cc65f4c1526dfbf5/numpy-2.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6e5a9cb2be39350ae6c8f79410744e80154df658d5bea06e06e0ac5bb75480d5", size = 13507739 }, + { url = "https://files.pythonhosted.org/packages/25/18/c732d7dd9896d11e4afcd487ac65e62f9fa0495563b7614eb850765361fa/numpy-2.1.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:d4c57b68c8ef5e1ebf47238e99bf27657511ec3f071c465f6b1bccbef12d4136", size = 5074465 }, + { url = "https://files.pythonhosted.org/packages/3e/37/838b7ae9262c370ab25312bab365492016f11810ffc03ebebbd54670b669/numpy-2.1.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:8ae0fd135e0b157365ac7cc31fff27f07a5572bdfc38f9c2d43b2aff416cc8b0", size = 6606418 }, + { url = "https://files.pythonhosted.org/packages/8b/b9/7ff3bfb71e316a5b43a124c4b7a5881ab12f3c32636014bef1f757f19dbd/numpy-2.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981707f6b31b59c0c24bcda52e5605f9701cb46da4b86c2e8023656ad3e833cb", size = 13692464 }, + { url = "https://files.pythonhosted.org/packages/42/78/75bcf16e6737cd196ff7ecf0e1fd3f953293a34dff4fd93fb488e8308536/numpy-2.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ca4b53e1e0b279142113b8c5eb7d7a877e967c306edc34f3b58e9be12fda8df", size = 16037763 }, + { url = "https://files.pythonhosted.org/packages/23/99/36bf5ffe034d06df307bc783e25cf164775863166dcd878879559fe0379f/numpy-2.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e097507396c0be4e547ff15b13dc3866f45f3680f789c1a1301b07dadd3fbc78", size = 16410374 }, + { url = "https://files.pythonhosted.org/packages/7f/16/04c5dab564887d4cd31a9ed30e51467fa70d52a4425f5a9bd1eed5b3d34c/numpy-2.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7506387e191fe8cdb267f912469a3cccc538ab108471291636a96a54e599556", size = 14169873 }, + { url = "https://files.pythonhosted.org/packages/09/e0/d1b5adbf1731886c4186c59a9fa208585df9452a43a2b60e79af7c649717/numpy-2.1.1-cp313-cp313-win32.whl", hash = "sha256:251105b7c42abe40e3a689881e1793370cc9724ad50d64b30b358bbb3a97553b", size = 6234118 }, + { url = "https://files.pythonhosted.org/packages/d0/9c/2391ee6e9ebe77232ddcab29d92662b545e99d78c3eb3b4e26d59b9ca1ca/numpy-2.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:f212d4f46b67ff604d11fff7cc62d36b3e8714edf68e44e9760e19be38c03eb0", size = 12561742 }, + { url = "https://files.pythonhosted.org/packages/38/0e/c4f754f9e73f9bb520e8bf418c646f2c4f70c5d5f2bc561e90f884593193/numpy-2.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:920b0911bb2e4414c50e55bd658baeb78281a47feeb064ab40c2b66ecba85553", size = 20858403 }, + { url = "https://files.pythonhosted.org/packages/32/fc/d69092b9171efa0cb8079577e71ce0cac0e08f917d33f6e99c916ed51d44/numpy-2.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:bab7c09454460a487e631ffc0c42057e3d8f2a9ddccd1e60c7bb8ed774992480", size = 13519851 }, + { url = "https://files.pythonhosted.org/packages/14/2a/d7cf2cd9f15b23f623075546ea64a2c367cab703338ca22aaaecf7e704df/numpy-2.1.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:cea427d1350f3fd0d2818ce7350095c1a2ee33e30961d2f0fef48576ddbbe90f", size = 5115444 }, + { url = "https://files.pythonhosted.org/packages/8e/00/e87b2cb4afcecca3b678deefb8fa53005d7054f3b5c39596e5554e5d98f8/numpy-2.1.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:e30356d530528a42eeba51420ae8bf6c6c09559051887196599d96ee5f536468", size = 6628903 }, + { url = "https://files.pythonhosted.org/packages/ab/9d/337ae8721b3beec48c3413d71f2d44b2defbf3c6f7a85184fc18b7b61f4a/numpy-2.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8dfa9e94fc127c40979c3eacbae1e61fda4fe71d84869cc129e2721973231ef", size = 13665945 }, + { url = "https://files.pythonhosted.org/packages/c0/90/ee8668e84c5d5cc080ef3beb622c016adf19ca3aa51afe9dbdcc6a9baf59/numpy-2.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910b47a6d0635ec1bd53b88f86120a52bf56dcc27b51f18c7b4a2e2224c29f0f", size = 16023473 }, + { url = "https://files.pythonhosted.org/packages/38/a0/57c24b2131879183051dc698fbb53fd43b77c3fa85b6e6311014f2bc2973/numpy-2.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:13cc11c00000848702322af4de0147ced365c81d66053a67c2e962a485b3717c", size = 16400624 }, + { url = "https://files.pythonhosted.org/packages/bb/4c/14a41eb5c9548c6cee6af0936eabfd985c69230ffa2f2598321431a9aa0a/numpy-2.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53e27293b3a2b661c03f79aa51c3987492bd4641ef933e366e0f9f6c9bf257ec", size = 14155072 }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/6d/121efd7382d5b0284239f4ab1fc1590d86d34ed4a4a2fdb13b30ca8e5740/nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728", size = 410594774 }, + { url = "https://files.pythonhosted.org/packages/c5/ef/32a375b74bea706c93deea5613552f7c9104f961b21df423f5887eca713b/nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906", size = 439918445 }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/00/6b218edd739ecfc60524e585ba8e6b00554dd908de2c9c66c1af3e44e18d/nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e", size = 14109015 }, + { url = "https://files.pythonhosted.org/packages/d0/56/0021e32ea2848c24242f6b56790bd0ccc8bf99f973ca790569c6ca028107/nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4", size = 10154340 }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/9f/c64c03f49d6fbc56196664d05dba14e3a561038a81a638eeb47f4d4cfd48/nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2", size = 23671734 }, + { url = "https://files.pythonhosted.org/packages/ad/1d/f76987c4f454eb86e0b9a0e4f57c3bf1ac1d13ad13cd1a4da4eb0e0c0ce9/nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed", size = 19331863 }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/d5/c68b1d2cdfcc59e72e8a5949a37ddb22ae6cade80cd4a57a84d4c8b55472/nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40", size = 823596 }, + { url = "https://files.pythonhosted.org/packages/9f/e2/7a2b4b5064af56ea8ea2d8b2776c0f2960d95c88716138806121ae52a9c9/nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344", size = 821226 }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.1.0.70" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 }, + { url = "https://files.pythonhosted.org/packages/3f/d0/f90ee6956a628f9f04bf467932c0a25e5a7e706a684b896593c06c82f460/nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a", size = 679925892 }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/94/eb540db023ce1d162e7bea9f8f5aa781d57c65aed513c33ee9a5123ead4d/nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56", size = 121635161 }, + { url = "https://files.pythonhosted.org/packages/f7/57/7927a3aa0e19927dfed30256d1c854caf991655d847a4e7c01fe87e3d4ac/nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253", size = 121344196 }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/31/4890b1c9abc496303412947fc7dcea3d14861720642b49e8ceed89636705/nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0", size = 56467784 }, + { url = "https://files.pythonhosted.org/packages/5c/97/4c9c7c79efcdf5b70374241d48cf03b94ef6707fd18ea0c0f53684931d0b/nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a", size = 55995813 }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12" }, + { name = "nvidia-cusparse-cu12" }, + { name = "nvidia-nvjitlink-cu12" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/1d/8de1e5c67099015c834315e333911273a8c6aaba78923dd1d1e25fc5f217/nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd", size = 124161928 }, + { url = "https://files.pythonhosted.org/packages/b8/80/8fca0bf819122a631c3976b6fc517c1b10741b643b94046bd8dd451522c5/nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5", size = 121643081 }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/5b/cfaeebf25cd9fdec14338ccb16f6b2c4c7fa9163aefcf057d86b9cc248bb/nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c", size = 195958278 }, + { url = "https://files.pythonhosted.org/packages/0f/95/48fdbba24c93614d1ecd35bc6bdc6087bd17cbacc3abc4b05a9c2a1ca232/nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a", size = 195414588 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.20.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/bb/d09dda47c881f9ff504afd6f9ca4f502ded6d8fc2f572cacc5e39da91c28/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01", size = 176238458 }, + { url = "https://files.pythonhosted.org/packages/4b/2a/0a131f572aa09f741c30ccd45a8e56316e8be8dfc7bc19bf0ab7cfef7b19/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56", size = 176249402 }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.6.68" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/8c/69c9e39cd6bfa813852a94e9bd3c075045e2707d163e9dc2326c82d2c330/nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b3fd0779845f68b92063ab1393abab1ed0a23412fc520df79a8190d098b5cd6b", size = 19253287 }, + { url = "https://files.pythonhosted.org/packages/a8/48/a9775d377cb95585fb188b469387f58ba6738e268de22eae2ad4cedb2c41/nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_x86_64.whl", hash = "sha256:125a6c2a44e96386dda634e13d944e60b07a0402d391a070e8fb4104b34ea1ab", size = 19725597 }, + { url = "https://files.pythonhosted.org/packages/00/d5/02af3b39427ed71e8c40b6912271499ec186a72405bcb7e4ca26ff70678c/nvidia_nvjitlink_cu12-12.6.68-py3-none-win_amd64.whl", hash = "sha256:a55744c98d70317c5e23db14866a8cc2b733f7324509e941fc96276f9f37801d", size = 161730369 }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/d3/8057f0587683ed2fcd4dbfbdfdfa807b9160b809976099d36b8f60d08f03/nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5", size = 99138 }, + { url = "https://files.pythonhosted.org/packages/b8/d7/bd7cb2d95ac6ac6e8d05bfa96cdce69619f1ef2808e072919044c2d47a8c/nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82", size = 66307 }, +] + +[[package]] +name = "openai" +version = "1.43.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/80/9390645de4e76bf8195073f23029a9b54cd13b4294e3a5bcb56e4df1aafc/openai-1.43.0.tar.gz", hash = "sha256:e607aff9fc3e28eade107e5edd8ca95a910a4b12589336d3cbb6bfe2ac306b3c", size = 292477 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/4d/affea11bd85ca69d9fdd15567495bb9088ac1c37498c95cb42d9ecd984ed/openai-1.43.0-py3-none-any.whl", hash = "sha256:1a748c2728edd3a738a72a0212ba866f4fdbe39c9ae03813508b267d45104abe", size = 365744 }, +] + [[package]] name = "packaging" version = "24.1" @@ -677,6 +1570,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985 }, ] +[[package]] +name = "pandas" +version = "2.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/88/d9/ecf715f34c73ccb1d8ceb82fc01cd1028a65a5f6dbc57bfa6ea155119058/pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54", size = 4398391 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/70/61704497903d43043e288017cb2b82155c0d41e15f5c17807920877b45c2/pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288", size = 12574808 }, + { url = "https://files.pythonhosted.org/packages/16/c6/75231fd47afd6b3f89011e7077f1a3958441264aca7ae9ff596e3276a5d0/pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151", size = 11304876 }, + { url = "https://files.pythonhosted.org/packages/97/2d/7b54f80b93379ff94afb3bd9b0cd1d17b48183a0d6f98045bc01ce1e06a7/pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b", size = 15602548 }, + { url = "https://files.pythonhosted.org/packages/fc/a5/4d82be566f069d7a9a702dcdf6f9106df0e0b042e738043c0cc7ddd7e3f6/pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee", size = 13031332 }, + { url = "https://files.pythonhosted.org/packages/92/a2/b79c48f530673567805e607712b29814b47dcaf0d167e87145eb4b0118c6/pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db", size = 16286054 }, + { url = "https://files.pythonhosted.org/packages/40/c7/47e94907f1d8fdb4868d61bd6c93d57b3784a964d52691b77ebfdb062842/pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1", size = 13879507 }, + { url = "https://files.pythonhosted.org/packages/ab/63/966db1321a0ad55df1d1fe51505d2cdae191b84c907974873817b0a6e849/pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24", size = 11634249 }, + { url = "https://files.pythonhosted.org/packages/dd/49/de869130028fb8d90e25da3b7d8fb13e40f5afa4c4af1781583eb1ff3839/pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef", size = 12500886 }, + { url = "https://files.pythonhosted.org/packages/db/7c/9a60add21b96140e22465d9adf09832feade45235cd22f4cb1668a25e443/pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce", size = 11340320 }, + { url = "https://files.pythonhosted.org/packages/b0/85/f95b5f322e1ae13b7ed7e97bd999160fa003424711ab4dc8344b8772c270/pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad", size = 15204346 }, + { url = "https://files.pythonhosted.org/packages/40/10/79e52ef01dfeb1c1ca47a109a01a248754ebe990e159a844ece12914de83/pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad", size = 12733396 }, + { url = "https://files.pythonhosted.org/packages/35/9d/208febf8c4eb5c1d9ea3314d52d8bd415fd0ef0dd66bb24cc5bdbc8fa71a/pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76", size = 15858913 }, + { url = "https://files.pythonhosted.org/packages/99/d1/2d9bd05def7a9e08a92ec929b5a4c8d5556ec76fae22b0fa486cbf33ea63/pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32", size = 13417786 }, + { url = "https://files.pythonhosted.org/packages/22/a5/a0b255295406ed54269814bc93723cfd1a0da63fb9aaf99e1364f07923e5/pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23", size = 11498828 }, +] + [[package]] name = "parameterized" version = "0.9.0" @@ -716,6 +1637,47 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, ] +[[package]] +name = "pillow" +version = "10.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/74/ad3d526f3bf7b6d3f408b73fde271ec69dfac8b81341a318ce825f2b3812/pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06", size = 46555059 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/62/c9449f9c3043c37f73e7487ec4ef0c03eb9c9afc91a92b977a67b3c0bbc5/pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c", size = 3509265 }, + { url = "https://files.pythonhosted.org/packages/f4/5f/491dafc7bbf5a3cc1845dc0430872e8096eb9e2b6f8161509d124594ec2d/pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be", size = 3375655 }, + { url = "https://files.pythonhosted.org/packages/73/d5/c4011a76f4207a3c151134cd22a1415741e42fa5ddecec7c0182887deb3d/pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3", size = 4340304 }, + { url = "https://files.pythonhosted.org/packages/ac/10/c67e20445a707f7a610699bba4fe050583b688d8cd2d202572b257f46600/pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6", size = 4452804 }, + { url = "https://files.pythonhosted.org/packages/a9/83/6523837906d1da2b269dee787e31df3b0acb12e3d08f024965a3e7f64665/pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe", size = 4365126 }, + { url = "https://files.pythonhosted.org/packages/ba/e5/8c68ff608a4203085158cff5cc2a3c534ec384536d9438c405ed6370d080/pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319", size = 4533541 }, + { url = "https://files.pythonhosted.org/packages/f4/7c/01b8dbdca5bc6785573f4cee96e2358b0918b7b2c7b60d8b6f3abf87a070/pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d", size = 4471616 }, + { url = "https://files.pythonhosted.org/packages/c8/57/2899b82394a35a0fbfd352e290945440e3b3785655a03365c0ca8279f351/pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696", size = 4600802 }, + { url = "https://files.pythonhosted.org/packages/4d/d7/a44f193d4c26e58ee5d2d9db3d4854b2cfb5b5e08d360a5e03fe987c0086/pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496", size = 2235213 }, + { url = "https://files.pythonhosted.org/packages/c1/d0/5866318eec2b801cdb8c82abf190c8343d8a1cd8bf5a0c17444a6f268291/pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91", size = 2554498 }, + { url = "https://files.pythonhosted.org/packages/d4/c8/310ac16ac2b97e902d9eb438688de0d961660a87703ad1561fd3dfbd2aa0/pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22", size = 2243219 }, + { url = "https://files.pythonhosted.org/packages/05/cb/0353013dc30c02a8be34eb91d25e4e4cf594b59e5a55ea1128fde1e5f8ea/pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94", size = 3509350 }, + { url = "https://files.pythonhosted.org/packages/e7/cf/5c558a0f247e0bf9cec92bff9b46ae6474dd736f6d906315e60e4075f737/pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597", size = 3374980 }, + { url = "https://files.pythonhosted.org/packages/84/48/6e394b86369a4eb68b8a1382c78dc092245af517385c086c5094e3b34428/pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80", size = 4343799 }, + { url = "https://files.pythonhosted.org/packages/3b/f3/a8c6c11fa84b59b9df0cd5694492da8c039a24cd159f0f6918690105c3be/pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca", size = 4459973 }, + { url = "https://files.pythonhosted.org/packages/7d/1b/c14b4197b80150fb64453585247e6fb2e1d93761fa0fa9cf63b102fde822/pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef", size = 4370054 }, + { url = "https://files.pythonhosted.org/packages/55/77/40daddf677897a923d5d33329acd52a2144d54a9644f2a5422c028c6bf2d/pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a", size = 4539484 }, + { url = "https://files.pythonhosted.org/packages/40/54/90de3e4256b1207300fb2b1d7168dd912a2fb4b2401e439ba23c2b2cabde/pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b", size = 4477375 }, + { url = "https://files.pythonhosted.org/packages/13/24/1bfba52f44193860918ff7c93d03d95e3f8748ca1de3ceaf11157a14cf16/pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9", size = 4608773 }, + { url = "https://files.pythonhosted.org/packages/55/04/5e6de6e6120451ec0c24516c41dbaf80cce1b6451f96561235ef2429da2e/pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42", size = 2235690 }, + { url = "https://files.pythonhosted.org/packages/74/0a/d4ce3c44bca8635bd29a2eab5aa181b654a734a29b263ca8efe013beea98/pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a", size = 2554951 }, + { url = "https://files.pythonhosted.org/packages/b5/ca/184349ee40f2e92439be9b3502ae6cfc43ac4b50bc4fc6b3de7957563894/pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9", size = 2243427 }, + { url = "https://files.pythonhosted.org/packages/c3/00/706cebe7c2c12a6318aabe5d354836f54adff7156fd9e1bd6c89f4ba0e98/pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3", size = 3525685 }, + { url = "https://files.pythonhosted.org/packages/cf/76/f658cbfa49405e5ecbfb9ba42d07074ad9792031267e782d409fd8fe7c69/pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb", size = 3374883 }, + { url = "https://files.pythonhosted.org/packages/46/2b/99c28c4379a85e65378211971c0b430d9c7234b1ec4d59b2668f6299e011/pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70", size = 4339837 }, + { url = "https://files.pythonhosted.org/packages/f1/74/b1ec314f624c0c43711fdf0d8076f82d9d802afd58f1d62c2a86878e8615/pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be", size = 4455562 }, + { url = "https://files.pythonhosted.org/packages/4a/2a/4b04157cb7b9c74372fa867096a1607e6fedad93a44deeff553ccd307868/pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0", size = 4366761 }, + { url = "https://files.pythonhosted.org/packages/ac/7b/8f1d815c1a6a268fe90481232c98dd0e5fa8c75e341a75f060037bd5ceae/pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc", size = 4536767 }, + { url = "https://files.pythonhosted.org/packages/e5/77/05fa64d1f45d12c22c314e7b97398ffb28ef2813a485465017b7978b3ce7/pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a", size = 4477989 }, + { url = "https://files.pythonhosted.org/packages/12/63/b0397cfc2caae05c3fb2f4ed1b4fc4fc878f0243510a7a6034ca59726494/pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309", size = 4610255 }, + { url = "https://files.pythonhosted.org/packages/7b/f9/cfaa5082ca9bc4a6de66ffe1c12c2d90bf09c309a5f52b27759a596900e7/pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060", size = 2235603 }, + { url = "https://files.pythonhosted.org/packages/01/6a/30ff0eef6e0c0e71e55ded56a38d4859bf9d3634a94a88743897b5f96936/pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea", size = 2554972 }, + { url = "https://files.pythonhosted.org/packages/48/2c/2e0a52890f269435eee38b21c8218e102c621fe8d8df8b9dd06fabf879ba/pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d", size = 2243375 }, +] + [[package]] name = "platformdirs" version = "4.2.2" @@ -778,6 +1740,37 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e8/23/22750c4b768f09386d1c3cc4337953e8936f48a888fa6dddfb669b2c9088/prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", size = 386411 }, ] +[[package]] +name = "protobuf" +version = "5.28.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5f/d7/331ee1f3b798c34d2257c79d5426ecbe95d46d2b40ba808a29da6947f6d8/protobuf-5.28.0.tar.gz", hash = "sha256:dde74af0fa774fa98892209992295adbfb91da3fa98c8f67a88afe8f5a349add", size = 422388 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/34/fc43138c93316839080324cb066f35224b75dae56b9f0fdd9d47c988ee9a/protobuf-5.28.0-cp310-abi3-win32.whl", hash = "sha256:66c3edeedb774a3508ae70d87b3a19786445fe9a068dd3585e0cefa8a77b83d0", size = 419672 }, + { url = "https://files.pythonhosted.org/packages/de/f7/e7e03be7e7307123f6467080f283e484de7e892db54dd9a46f057d08c9ee/protobuf-5.28.0-cp310-abi3-win_amd64.whl", hash = "sha256:6d7cc9e60f976cf3e873acb9a40fed04afb5d224608ed5c1a105db4a3f09c5b6", size = 431486 }, + { url = "https://files.pythonhosted.org/packages/ce/ec/34f67d6a3398aa360524d90f75a8c648c99c807b2f1001f5ab16355c1d12/protobuf-5.28.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:532627e8fdd825cf8767a2d2b94d77e874d5ddb0adefb04b237f7cc296748681", size = 414744 }, + { url = "https://files.pythonhosted.org/packages/fe/79/636415c84eed9835fed83183db73fd6ea7ba76a85cae321ff2eaad722e85/protobuf-5.28.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:018db9056b9d75eb93d12a9d35120f97a84d9a919bcab11ed56ad2d399d6e8dd", size = 316527 }, + { url = "https://files.pythonhosted.org/packages/19/15/da43113361db20f2d521bc38d92549edbe06856aeec085c420b2b8af5751/protobuf-5.28.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:6206afcb2d90181ae8722798dcb56dc76675ab67458ac24c0dd7d75d632ac9bd", size = 316615 }, + { url = "https://files.pythonhosted.org/packages/e3/b2/4df9958122a0377e571972c71692420bafd623d1df3ce506d88c2aba7e12/protobuf-5.28.0-py3-none-any.whl", hash = "sha256:510ed78cd0980f6d3218099e874714cdf0d8a95582e7b059b06cabad855ed0a0", size = 169574 }, +] + +[[package]] +name = "psutil" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/c7/8c6872f7372eb6a6b2e4708b88419fb46b857f7a2e1892966b851cc79fc9/psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", size = 508067 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/66/78c9c3020f573c58101dc43a44f6855d01bbbd747e24da2f0c4491200ea3/psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", size = 249766 }, + { url = "https://files.pythonhosted.org/packages/e1/3f/2403aa9558bea4d3854b0e5e567bc3dd8e9fbc1fc4453c0aa9aafeb75467/psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", size = 253024 }, + { url = "https://files.pythonhosted.org/packages/0b/37/f8da2fbd29690b3557cca414c1949f92162981920699cd62095a984983bf/psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", size = 250961 }, + { url = "https://files.pythonhosted.org/packages/35/56/72f86175e81c656a01c4401cd3b1c923f891b31fbcebe98985894176d7c9/psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", size = 287478 }, + { url = "https://files.pythonhosted.org/packages/19/74/f59e7e0d392bc1070e9a70e2f9190d652487ac115bb16e2eff6b22ad1d24/psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", size = 290455 }, + { url = "https://files.pythonhosted.org/packages/cd/5f/60038e277ff0a9cc8f0c9ea3d0c5eb6ee1d2470ea3f9389d776432888e47/psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", size = 292046 }, + { url = "https://files.pythonhosted.org/packages/8b/20/2ff69ad9c35c3df1858ac4e094f20bd2374d33c8643cf41da8fd7cdcb78b/psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", size = 253560 }, + { url = "https://files.pythonhosted.org/packages/73/44/561092313ae925f3acfaace6f9ddc4f6a9c748704317bad9c8c8f8a36a79/psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", size = 257399 }, + { url = "https://files.pythonhosted.org/packages/7c/06/63872a64c312a24fb9b4af123ee7007a306617da63ff13bcc1432386ead7/psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0", size = 251988 }, +] + [[package]] name = "ptyprocess" version = "0.7.0" @@ -857,6 +1850,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/13/63/b95781763e8d84207025071c0cec16d921c0163c7a9033ae4b9a0e020dc7/pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20", size = 1898013 }, ] +[[package]] +name = "pydot" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyparsing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/2f/482fcbc389e180e7f8d7e7cb06bc5a7c37be6c57939dfb950951d97f2722/pydot-2.0.0.tar.gz", hash = "sha256:60246af215123fa062f21cd791be67dda23a6f280df09f68919e637a1e4f3235", size = 152022 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/90/c9b51f3cdff89cd8f93382060330f43d1af098a6624cff439e700791e922/pydot-2.0.0-py3-none-any.whl", hash = "sha256:408a47913ea7bd5d2d34b274144880c1310c4aee901f353cf21fe2e526a4ea28", size = 22675 }, +] + [[package]] name = "pygments" version = "2.18.0" @@ -909,6 +1914,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/11/80/34b429c6534be99ef3d6d20bd794b26fda0682d38e2d57f85df258beaac2/pylint_pydantic-0.3.2-py3-none-any.whl", hash = "sha256:e5cec02370aa68ac8eff138e5d573b0ac049bab864e9a6c3a9057cf043440aa1", size = 15951 }, ] +[[package]] +name = "pyparsing" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/08/13f3bce01b2061f2bbd582c9df82723de943784cf719a35ac886c652043a/pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032", size = 900231 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/0c/0e3c05b1c87bb6a1c76d281b0f35e78d2d80ac91b5f8f524cebf77f51049/pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c", size = 104100 }, +] + [[package]] name = "pyproject-hooks" version = "1.1.0" @@ -945,6 +1959,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024 }, ] +[[package]] +name = "pytest-rerunfailures" +version = "14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/a4/6de45fe850759e94aa9a55cda807c76245af1941047294df26c851dfb4a9/pytest-rerunfailures-14.0.tar.gz", hash = "sha256:4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92", size = 21350 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/e7/e75bd157331aecc190f5f8950d7ea3d2cf56c3c57fb44da70e60b221133f/pytest_rerunfailures-14.0-py3-none-any.whl", hash = "sha256:4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32", size = 12709 }, +] + [[package]] name = "pytest-subtests" version = "0.13.1" @@ -1026,6 +2053,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, ] +[[package]] +name = "python-dotenv" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, +] + +[[package]] +name = "pytz" +version = "2024.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/26/9f1f00a5d021fff16dee3de13d43e5e978f3d58928e129c3a62cf7eb9738/pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812", size = 316214 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/3d/a121f284241f08268b21359bd425f7d4825cffc5ac5cd0e1b3d82ffd2b10/pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319", size = 505474 }, +] + [[package]] name = "pyyaml" version = "6.0.2" @@ -1159,6 +2204,83 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/86/d6/17caf2e4af1dec288477a0cbbe4a96fbc9b8a28457dce3f1f452630ce216/runs-1.2.2-py3-none-any.whl", hash = "sha256:0980dcbc25aba1505f307ac4f0e9e92cbd0be2a15a1e983ee86c24c87b839dfd", size = 7033 }, ] +[[package]] +name = "safetensors" +version = "0.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/5b/0e63bf736e171463481c5ea3406650dc25aa044083062d321820e7a1ef9f/safetensors-0.4.4.tar.gz", hash = "sha256:5fe3e9b705250d0172ed4e100a811543108653fb2b66b9e702a088ad03772a07", size = 69522 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/1b/27cea7a581019d0d674284048ff76e3a6e048bc3ae3c31cb0bfc93641180/safetensors-0.4.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bbaa31f2cb49013818bde319232ccd72da62ee40f7d2aa532083eda5664e85ff", size = 392373 }, + { url = "https://files.pythonhosted.org/packages/36/46/93c39c96188a88ca15d12759bb51f52ce7365f6fd19ef09580bc096e8860/safetensors-0.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fdcb80f4e9fbb33b58e9bf95e7dbbedff505d1bcd1c05f7c7ce883632710006", size = 381488 }, + { url = "https://files.pythonhosted.org/packages/37/a2/93cab60b8e2c8ea6343a04cdd2c09c860c9640eaaffbf8b771a0e8f98e7d/safetensors-0.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55c14c20be247b8a1aeaf3ab4476265e3ca83096bb8e09bb1a7aa806088def4f", size = 441025 }, + { url = "https://files.pythonhosted.org/packages/19/37/2a5220dce5eff841328bfc3071f4a7063f3eb12341893b2688669fc67115/safetensors-0.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949aaa1118660f992dbf0968487b3e3cfdad67f948658ab08c6b5762e90cc8b6", size = 439791 }, + { url = "https://files.pythonhosted.org/packages/f8/93/1d894ff44df26baf4c2471a5874388361390d3cb1cc4811cff40fc01373e/safetensors-0.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c11a4ab7debc456326a2bac67f35ee0ac792bcf812c7562a4a28559a5c795e27", size = 477752 }, + { url = "https://files.pythonhosted.org/packages/a5/17/b697f517c7ffb8d62d1ef17c6224c00edbb96b931e565d887476a51ac803/safetensors-0.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0cea44bba5c5601b297bc8307e4075535b95163402e4906b2e9b82788a2a6df", size = 496019 }, + { url = "https://files.pythonhosted.org/packages/af/b9/c33f69f4dad9c65209efb76c2be6968af5219e31ccfd344a0025d972252f/safetensors-0.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9d752c97f6bbe327352f76e5b86442d776abc789249fc5e72eacb49e6916482", size = 435416 }, + { url = "https://files.pythonhosted.org/packages/71/59/f6480a68df2f4fb5aefae45a800d9bc043c0549210075275fef190a896ce/safetensors-0.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03f2bb92e61b055ef6cc22883ad1ae898010a95730fa988c60a23800eb742c2c", size = 456771 }, + { url = "https://files.pythonhosted.org/packages/09/01/2a7507cdf7318fb68596e6537ef81e83cfc171c483b4a786b9c947368e19/safetensors-0.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf3f91a9328a941acc44eceffd4e1f5f89b030985b2966637e582157173b98", size = 619456 }, + { url = "https://files.pythonhosted.org/packages/80/b3/4bb5b1fb025cb8c81fe8a76371334860a9c276fade616f83fd53feef2740/safetensors-0.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20d218ec2b6899d29d6895419a58b6e44cc5ff8f0cc29fac8d236a8978ab702e", size = 605125 }, + { url = "https://files.pythonhosted.org/packages/09/93/0d6d54b84eff8361dc257fa306ae0ef1899025a2d9657efe8384ac8b7267/safetensors-0.4.4-cp311-none-win32.whl", hash = "sha256:8079486118919f600c603536e2490ca37b3dbd3280e3ad6eaacfe6264605ac8a", size = 272273 }, + { url = "https://files.pythonhosted.org/packages/21/4f/5ee44681c7ea827f9d3c104ca429865b41c05a4163eff7f0599152c2e682/safetensors-0.4.4-cp311-none-win_amd64.whl", hash = "sha256:2f8c2eb0615e2e64ee27d478c7c13f51e5329d7972d9e15528d3e4cfc4a08f0d", size = 285982 }, + { url = "https://files.pythonhosted.org/packages/e2/41/a491dbe3fc1c195ce648939a87d3b4b3800eaade2f05278a6dc02b575c51/safetensors-0.4.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baec5675944b4a47749c93c01c73d826ef7d42d36ba8d0dba36336fa80c76426", size = 391372 }, + { url = "https://files.pythonhosted.org/packages/3a/a1/d99aa8d10fa8d82276ee2aaa87afd0a6b96e69c128eaa9f93524b52c5276/safetensors-0.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f15117b96866401825f3e94543145028a2947d19974429246ce59403f49e77c6", size = 381800 }, + { url = "https://files.pythonhosted.org/packages/c8/1c/4fa05b79afdd4688a357a42433565b5b09137af6b4f6cd0c9e371466e2f1/safetensors-0.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a13a9caea485df164c51be4eb0c87f97f790b7c3213d635eba2314d959fe929", size = 440817 }, + { url = "https://files.pythonhosted.org/packages/65/c0/152b059debd3cee4f44b7df972e915a38f776379ea99ce4a3cbea3f78dbd/safetensors-0.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b54bc4ca5f9b9bba8cd4fb91c24b2446a86b5ae7f8975cf3b7a277353c3127c", size = 439483 }, + { url = "https://files.pythonhosted.org/packages/9c/93/20c05daeecf6fa93b9403c3660df1d983d7ddd5cdb3e3710ff41b72754dd/safetensors-0.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08332c22e03b651c8eb7bf5fc2de90044f3672f43403b3d9ac7e7e0f4f76495e", size = 476631 }, + { url = "https://files.pythonhosted.org/packages/84/2f/bfe3e54b7dbcaef3f10b8f3c71146790ab18b0bd79ad9ca2bc2c950b68df/safetensors-0.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb62841e839ee992c37bb75e75891c7f4904e772db3691c59daaca5b4ab960e1", size = 493575 }, + { url = "https://files.pythonhosted.org/packages/1b/0b/2a1b405131f26b95acdb3ed6c8e3a8c84de72d364fd26202d43e68ec4bad/safetensors-0.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5b927acc5f2f59547270b0309a46d983edc44be64e1ca27a7fcb0474d6cd67", size = 434891 }, + { url = "https://files.pythonhosted.org/packages/31/ce/cad390a08128ebcb74be79a1e03c496a4773059b2541c6a97a52fd1705fb/safetensors-0.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a69c71b1ae98a8021a09a0b43363b0143b0ce74e7c0e83cacba691b62655fb8", size = 457631 }, + { url = "https://files.pythonhosted.org/packages/9f/83/d9d6e6a45d624c27155f4336af8e7b2bcde346137f6460dcd5e1bcdc2e3f/safetensors-0.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23654ad162c02a5636f0cd520a0310902c4421aab1d91a0b667722a4937cc445", size = 619367 }, + { url = "https://files.pythonhosted.org/packages/9f/20/b37e1ae87cb83a1c2fe5cf0710bab12d6f186474cbbdda4fda2d7d57d225/safetensors-0.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0677c109d949cf53756859160b955b2e75b0eefe952189c184d7be30ecf7e858", size = 605302 }, + { url = "https://files.pythonhosted.org/packages/99/5a/9237f1d0adba5eec3711d7c1911b3111631a86779d692fe8ad2cd709d6a4/safetensors-0.4.4-cp312-none-win32.whl", hash = "sha256:a51d0ddd4deb8871c6de15a772ef40b3dbd26a3c0451bb9e66bc76fc5a784e5b", size = 273434 }, + { url = "https://files.pythonhosted.org/packages/b9/dd/b11f3a33fe7b6c94fde08b3de094b93d3438d67922ef90bcb5002e306e0b/safetensors-0.4.4-cp312-none-win_amd64.whl", hash = "sha256:2d065059e75a798bc1933c293b68d04d79b586bb7f8c921e0ca1e82759d0dbb1", size = 286347 }, + { url = "https://files.pythonhosted.org/packages/b3/d6/7a4db869a295b57066e1399eb467c38df86439d3766c850ca8eb75b5e3a3/safetensors-0.4.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9d625692578dd40a112df30c02a1adf068027566abd8e6a74893bb13d441c150", size = 391373 }, + { url = "https://files.pythonhosted.org/packages/1e/97/de856ad42ef65822ff982e7af7fc889cd717240672b45c647af7ea05c631/safetensors-0.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7cabcf39c81e5b988d0adefdaea2eb9b4fd9bd62d5ed6559988c62f36bfa9a89", size = 382523 }, + { url = "https://files.pythonhosted.org/packages/07/d2/d9316af4c15b4ca0362cb4498abe47be6e04f7119f3ccf697e38ee04d33b/safetensors-0.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8359bef65f49d51476e9811d59c015f0ddae618ee0e44144f5595278c9f8268c", size = 441039 }, + { url = "https://files.pythonhosted.org/packages/e8/ac/478e910c891feadb693316b31447f14929b7047a612df9b628589b89be3c/safetensors-0.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1a32c662e7df9226fd850f054a3ead0e4213a96a70b5ce37b2d26ba27004e013", size = 439516 }, + { url = "https://files.pythonhosted.org/packages/81/43/f9929e854c4fcca98459f03de003d9619dd5f7d10d74e03df7af9907b119/safetensors-0.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c329a4dcc395364a1c0d2d1574d725fe81a840783dda64c31c5a60fc7d41472c", size = 477242 }, + { url = "https://files.pythonhosted.org/packages/0a/4d/b754f59fe395ea5bd8531c090c557e161fffed1753eeb3d87c0f8eaa62c4/safetensors-0.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:239ee093b1db877c9f8fe2d71331a97f3b9c7c0d3ab9f09c4851004a11f44b65", size = 494615 }, + { url = "https://files.pythonhosted.org/packages/54/7d/b26801dab2ecb499eb1ebdb46be65600b49bb062fe12b298150695a6e23c/safetensors-0.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd574145d930cf9405a64f9923600879a5ce51d9f315443a5f706374841327b6", size = 434933 }, + { url = "https://files.pythonhosted.org/packages/e2/40/0f6627ad98e21e620a6835f02729f6b701804d3c452f8773648cbd0b9c2c/safetensors-0.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f6784eed29f9e036acb0b7769d9e78a0dc2c72c2d8ba7903005350d817e287a4", size = 457646 }, + { url = "https://files.pythonhosted.org/packages/30/1e/7f7819d1be7c36fbedcb7099a461b79e0ed19631b3ca5595e0f81501bb2c/safetensors-0.4.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:65a4a6072436bf0a4825b1c295d248cc17e5f4651e60ee62427a5bcaa8622a7a", size = 619204 }, + { url = "https://files.pythonhosted.org/packages/b1/58/e91e8c9888303919ce56f038fcad4147431fd95630890799bf8c928d1d34/safetensors-0.4.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:df81e3407630de060ae8313da49509c3caa33b1a9415562284eaf3d0c7705f9f", size = 605400 }, +] + +[[package]] +name = "scipy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/11/4d44a1f274e002784e4dbdb81e0ea96d2de2d1045b2132d5af62cc31fd28/scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417", size = 58620554 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/ab/070ccfabe870d9f105b04aee1e2860520460ef7ca0213172abfe871463b9/scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675", size = 39076999 }, + { url = "https://files.pythonhosted.org/packages/a7/c5/02ac82f9bb8f70818099df7e86c3ad28dae64e1347b421d8e3adf26acab6/scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2", size = 29894570 }, + { url = "https://files.pythonhosted.org/packages/ed/05/7f03e680cc5249c4f96c9e4e845acde08eb1aee5bc216eff8a089baa4ddb/scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617", size = 23103567 }, + { url = "https://files.pythonhosted.org/packages/5e/fc/9f1413bef53171f379d786aabc104d4abeea48ee84c553a3e3d8c9f96a9c/scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8", size = 25499102 }, + { url = "https://files.pythonhosted.org/packages/c2/4b/b44bee3c2ddc316b0159b3d87a3d467ef8d7edfd525e6f7364a62cd87d90/scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37", size = 35586346 }, + { url = "https://files.pythonhosted.org/packages/93/6b/701776d4bd6bdd9b629c387b5140f006185bd8ddea16788a44434376b98f/scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2", size = 41165244 }, + { url = "https://files.pythonhosted.org/packages/06/57/e6aa6f55729a8f245d8a6984f2855696c5992113a5dc789065020f8be753/scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2", size = 42817917 }, + { url = "https://files.pythonhosted.org/packages/ea/c2/5ecadc5fcccefaece775feadcd795060adf5c3b29a883bff0e678cfe89af/scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94", size = 44781033 }, + { url = "https://files.pythonhosted.org/packages/c0/04/2bdacc8ac6387b15db6faa40295f8bd25eccf33f1f13e68a72dc3c60a99e/scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d", size = 39128781 }, + { url = "https://files.pythonhosted.org/packages/c8/53/35b4d41f5fd42f5781dbd0dd6c05d35ba8aa75c84ecddc7d44756cd8da2e/scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07", size = 29939542 }, + { url = "https://files.pythonhosted.org/packages/66/67/6ef192e0e4d77b20cc33a01e743b00bc9e68fb83b88e06e636d2619a8767/scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5", size = 23148375 }, + { url = "https://files.pythonhosted.org/packages/f6/32/3a6dedd51d68eb7b8e7dc7947d5d841bcb699f1bf4463639554986f4d782/scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc", size = 25578573 }, + { url = "https://files.pythonhosted.org/packages/f0/5a/efa92a58dc3a2898705f1dc9dbaf390ca7d4fba26d6ab8cfffb0c72f656f/scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310", size = 35319299 }, + { url = "https://files.pythonhosted.org/packages/8e/ee/8a26858ca517e9c64f84b4c7734b89bda8e63bec85c3d2f432d225bb1886/scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066", size = 40849331 }, + { url = "https://files.pythonhosted.org/packages/a5/cd/06f72bc9187840f1c99e1a8750aad4216fc7dfdd7df46e6280add14b4822/scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1", size = 42544049 }, + { url = "https://files.pythonhosted.org/packages/aa/7d/43ab67228ef98c6b5dd42ab386eae2d7877036970a0d7e3dd3eb47a0d530/scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f", size = 44521212 }, + { url = "https://files.pythonhosted.org/packages/50/ef/ac98346db016ff18a6ad7626a35808f37074d25796fd0234c2bb0ed1e054/scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79", size = 39091068 }, + { url = "https://files.pythonhosted.org/packages/b9/cc/70948fe9f393b911b4251e96b55bbdeaa8cca41f37c26fd1df0232933b9e/scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e", size = 29875417 }, + { url = "https://files.pythonhosted.org/packages/3b/2e/35f549b7d231c1c9f9639f9ef49b815d816bf54dd050da5da1c11517a218/scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73", size = 23084508 }, + { url = "https://files.pythonhosted.org/packages/3f/d6/b028e3f3e59fae61fb8c0f450db732c43dd1d836223a589a8be9f6377203/scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e", size = 25503364 }, + { url = "https://files.pythonhosted.org/packages/a7/2f/6c142b352ac15967744d62b165537a965e95d557085db4beab2a11f7943b/scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d", size = 35292639 }, + { url = "https://files.pythonhosted.org/packages/56/46/2449e6e51e0d7c3575f289f6acb7f828938eaab8874dbccfeb0cd2b71a27/scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e", size = 40798288 }, + { url = "https://files.pythonhosted.org/packages/32/cd/9d86f7ed7f4497c9fd3e39f8918dd93d9f647ba80d7e34e4946c0c2d1a7c/scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06", size = 42524647 }, + { url = "https://files.pythonhosted.org/packages/f5/1b/6ee032251bf4cdb0cc50059374e86a9f076308c1512b61c4e003e241efb7/scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84", size = 44469524 }, +] + [[package]] name = "sentry-sdk" version = "2.13.0" @@ -1172,6 +2294,47 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ad/7e/e9ca09f24a6c334286631a2d32c267cdc5edad5ac03fd9d20a01a82f1c35/sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6", size = 309078 }, ] +[[package]] +name = "setproctitle" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/e1/b16b16a1aa12174349d15b73fd4b87e641a8ae3fb1163e80938dbbf6ae98/setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae", size = 27253 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/17/7f9d5ddf4cfc4386e74565ccf63b8381396336e4629bb165b52b803ceddb/setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8", size = 16948 }, + { url = "https://files.pythonhosted.org/packages/ff/5d/77edf4c29c8d6728b49d3f0abb22159bb9c0c4ddebd721c09486b34985c8/setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a", size = 11305 }, + { url = "https://files.pythonhosted.org/packages/13/f0/263954ca925a278036f100405e7ba82d4341e1e6bdc09f35362a7b40f684/setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8", size = 31578 }, + { url = "https://files.pythonhosted.org/packages/79/52/503b546da451deb78fde27fec96c39d3f63a7958be60c9a837de89f47a0d/setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd", size = 32910 }, + { url = "https://files.pythonhosted.org/packages/48/72/aeb734419a58a85ca7845c3d0011c322597da4ff601ebbc28f6c1dfd1ae8/setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5", size = 30086 }, + { url = "https://files.pythonhosted.org/packages/fd/df/44b267cb8f073a4ae77e120f0705ab3a07165ad90cecd4881b34c7e1e37b/setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353", size = 31076 }, + { url = "https://files.pythonhosted.org/packages/82/c2/79ad43c914418cb1920e0198ac7326061c05cd4ec75c86ed0ca456b7e957/setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d", size = 41226 }, + { url = "https://files.pythonhosted.org/packages/81/1b/0498c36a07a73d39a7070f45d96a299006e624efc07fc2e2296286237316/setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5", size = 39723 }, + { url = "https://files.pythonhosted.org/packages/3a/fe/ebbcffd6012b9cf5edb017a9c30cfc2beccf707f5bf495da8cf69b4abe69/setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0", size = 42773 }, + { url = "https://files.pythonhosted.org/packages/64/b1/5786c0442435eb18d04299c8ce7d1f86feb5154444ac684963527a76e169/setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18", size = 41089 }, + { url = "https://files.pythonhosted.org/packages/33/fb/14b41e920406a12de0a164ef3b86d62edb4fac63d91d9f86f3b80dae5b38/setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476", size = 11066 }, + { url = "https://files.pythonhosted.org/packages/7e/ba/f6da9ba74e8c2c662e932b27a01025c1bee2846222f6a2e87a69c259772f/setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085", size = 11817 }, + { url = "https://files.pythonhosted.org/packages/32/22/9672612b194e4ac5d9fb67922ad9d30232b4b66129b0381ab5efeb6ae88f/setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc", size = 16917 }, + { url = "https://files.pythonhosted.org/packages/49/e5/562ff00f2f3f4253ff8fa6886e0432b8eae8cde82530ac19843d8ed2c485/setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64", size = 11264 }, + { url = "https://files.pythonhosted.org/packages/8f/1f/f97ea7bf71c873590a63d62ba20bf7294439d1c28603e5c63e3616c2131a/setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e", size = 31907 }, + { url = "https://files.pythonhosted.org/packages/66/fb/2d90806b9a2ed97c140baade3d1d2d41d3b51458300a2d999268be24d21d/setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7", size = 33333 }, + { url = "https://files.pythonhosted.org/packages/38/39/e7ce791f5635f3a16bd21d6b79bd9280c4c4aed8ab936b4b21334acf05a7/setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120", size = 30573 }, + { url = "https://files.pythonhosted.org/packages/20/22/fd76bbde4194d4e31d5b31a02f80c8e7e54a99d3d8ff34f3d656c6655689/setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381", size = 31601 }, + { url = "https://files.pythonhosted.org/packages/51/5c/a6257cc68e17abcc4d4a78cc6666aa0d3805af6d942576625c4a468a72f0/setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6", size = 40717 }, + { url = "https://files.pythonhosted.org/packages/db/31/4f0faad7ef641be4e8dfcbc40829775f2d6a4ca1ff435a4074047fa3dad1/setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a", size = 39384 }, + { url = "https://files.pythonhosted.org/packages/22/17/8763dc4f9ddf36af5f043ceec213b0f9f45f09fd2d5061a89c699aabe8b0/setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8", size = 42350 }, + { url = "https://files.pythonhosted.org/packages/7b/b2/2403cecf2e5c5b4da22f7d9df4b2149bf92d03a3422185e682e81055549c/setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3", size = 40704 }, + { url = "https://files.pythonhosted.org/packages/5e/c1/11e80061ac06aece2a0ffcaf018cdc088aebb2fc586f68201755518532ad/setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4", size = 11057 }, + { url = "https://files.pythonhosted.org/packages/90/e8/ece468e93e99d3b2826e9649f6d03e80f071d451e20c742f201f77d1bea1/setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7", size = 11809 }, +] + +[[package]] +name = "setuptools" +version = "74.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/27/cb/e754933c1ca726b0d99980612dc9da2886e76c83968c246cfb50f491a96b/setuptools-74.1.1.tar.gz", hash = "sha256:2353af060c06388be1cecbf5953dcdb1f38362f87a2356c480b6b4d5fcfc8847", size = 1357738 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f3/e30ee63caefa90716afdffd7d9ae959cd8d0dbd2d0a0eb9fe1d73ddf806b/setuptools-74.1.1-py3-none-any.whl", hash = "sha256:fc91b5f89e392ef5b77fe143b17e32f65d3024744fba66dc3afe07201684d766", size = 1263655 }, +] + [[package]] name = "six" version = "1.16.0" @@ -1190,33 +2353,42 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a7/a5/10f97f73544edcdef54409f1d839f6049a0d79df68adbc1ceb24d1aaca42/smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da", size = 24282 }, ] +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + [[package]] name = "sqlalchemy" -version = "2.0.32" +version = "2.0.33" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "greenlet", marker = "(python_full_version < '3.13' and platform_machine == 'AMD64') or (python_full_version < '3.13' and platform_machine == 'WIN32') or (python_full_version < '3.13' and platform_machine == 'aarch64') or (python_full_version < '3.13' and platform_machine == 'amd64') or (python_full_version < '3.13' and platform_machine == 'ppc64le') or (python_full_version < '3.13' and platform_machine == 'win32') or (python_full_version < '3.13' and platform_machine == 'x86_64')" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/af/6f/967e987683908af816aa3072c1a6997ac9933cf38d66b0474fb03f253323/SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8", size = 9546691 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/a9/e3bd92004095ed6796ea4ac5fdd9606b1e53117ef5b90ae79ac3fc6e225e/SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f", size = 2088752 }, - { url = "https://files.pythonhosted.org/packages/a9/34/b97f4458eefbdead7ee5ce69cbf3591574c5ba44162dbe52c4386818623f/SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5", size = 2079150 }, - { url = "https://files.pythonhosted.org/packages/6b/b5/95ff12f5d4eb7813dd5a59ccc8e3c68d4683fedf59801b40704593c3b757/SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d", size = 3197551 }, - { url = "https://files.pythonhosted.org/packages/ca/af/379f8695ab751acf61868b0098c8d66e2b2ad8b11d9939d5144c82d05bc5/SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0", size = 3197551 }, - { url = "https://files.pythonhosted.org/packages/ff/0c/5feaea51f23b5f008f16f9dbf7eec18ee5b9b8eb2875d6e367f52daf633e/SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2", size = 3134583 }, - { url = "https://files.pythonhosted.org/packages/cc/83/4eca3604f9049a2b92a9ffb818ea1cc8186f722e539a6feee58f931bad34/SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961", size = 3154911 }, - { url = "https://files.pythonhosted.org/packages/3d/56/485ad322f148a8b70060e03b5f130e714f95d839b5e50315e5c5efd1fc05/SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28", size = 2059047 }, - { url = "https://files.pythonhosted.org/packages/bb/8c/4548ae42b4ab7f3fe9f1aeb4b1f28ea795485ca44840cb0f3f57aa8ecfcc/SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924", size = 2084480 }, - { url = "https://files.pythonhosted.org/packages/06/95/88beb07aa61c611829c9ce950f349adcf00065c1bb313090c20d80a520ca/SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92", size = 2087267 }, - { url = "https://files.pythonhosted.org/packages/11/93/0b28f9d261af927eef3df472e5bbf144fb33e062de770b2c312bb516702b/SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9", size = 2077732 }, - { url = "https://files.pythonhosted.org/packages/84/50/1ce1dec4b1cce8f1163c2c58bb1588ac5076c3dbc4bb1d3eab70e798fdd4/SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8", size = 3227230 }, - { url = "https://files.pythonhosted.org/packages/9d/b8/aa822988d390cf06afa3c69d86a3a38bba79b51385207cd7cd99d0be17bb/SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec", size = 3238118 }, - { url = "https://files.pythonhosted.org/packages/c3/d7/7a65172ed2713acf0262a65392dfcf05ca2b7a67c988ebad425eba9b3843/SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c", size = 3173610 }, - { url = "https://files.pythonhosted.org/packages/a9/0f/8da0613e3f0b095ef423802943ed4b98242370736034ed5043a43c46c3d4/SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb", size = 3200224 }, - { url = "https://files.pythonhosted.org/packages/50/ef/973e0bbf2be5c12e34dca92139ca100f51ba078e36c3c06fd1dc8480c209/SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d", size = 2057626 }, - { url = "https://files.pythonhosted.org/packages/db/5f/440c324aae82a2ce892ac0fe1d114b9dc9f04e934e8f0762574876a168b5/SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb", size = 2083167 }, - { url = "https://files.pythonhosted.org/packages/99/1b/045185a9f6481d926a451aafaa0d07c98f19ac7abe730dff9630c9ead4fa/SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202", size = 1878765 }, +sdist = { url = "https://files.pythonhosted.org/packages/d5/70/6dc437aff20e454e8ac35cdcc74620fad55985b5ea2830fa2d73b02d5805/sqlalchemy-2.0.33.tar.gz", hash = "sha256:91c93333c2b37ff721dc83b37e28c29de4c502b5612f2d093468037b86aa2be0", size = 9555649 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/aa/c78261913d808f73155e65bd10dfa412324958febe64a4a72c8970227419/SQLAlchemy-2.0.33-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570ec43e8c3c020abac4f0720baa5fe5187334e3f1e8e1777183c041962b61cc", size = 2090666 }, + { url = "https://files.pythonhosted.org/packages/ca/1a/71e0630d3c1fdb749a0121dc7952ede5bd1abc398bc5a7cdba4772346fe1/SQLAlchemy-2.0.33-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81759e77a4985abdbac068762a0eaf0f11860fe041ad6da170aae7615ea72531", size = 2081068 }, + { url = "https://files.pythonhosted.org/packages/25/b1/6356c2ce8b7d29af484723c87a2f5cdf90edcd62b022ac4a9d7ed5af0ad1/SQLAlchemy-2.0.33-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49541a43828e273325c520fbacf786615bd974dad63ff60b8ea1e1216e914d1a", size = 3199480 }, + { url = "https://files.pythonhosted.org/packages/dd/ec/00123f88ab35a65bc2bc81ac98da2ed5ee5013add5585387c608c85df687/SQLAlchemy-2.0.33-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82c72da5be489c8d150deba70d5732398695418df5232bceb52ee323ddd9753b", size = 3199478 }, + { url = "https://files.pythonhosted.org/packages/6c/31/5de26dc3d5fc5903f100dc02d3140126f5e8df943c8444841c09ea70fe3c/SQLAlchemy-2.0.33-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:31e56020832be602201fbf8189f379569cf5c3604cdc4ce79f10dbbfcbf8a0eb", size = 3136505 }, + { url = "https://files.pythonhosted.org/packages/74/ab/e9f094580af17ee75fd437a9bd0d900286596868d0d6f63de8046203118b/SQLAlchemy-2.0.33-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:30a3f55be76364b64c83788728faaba782ab282a24909e1994404c2146d39982", size = 3156832 }, + { url = "https://files.pythonhosted.org/packages/79/42/e56bfbf31941e12c85ae52d69a1f01e6630362aec7c72dd5cd0a85321b3e/SQLAlchemy-2.0.33-cp311-cp311-win32.whl", hash = "sha256:17d0c69f66392ad2db1609373a74d1f834b2e632f3f52d446747b8ec220aea53", size = 2061357 }, + { url = "https://files.pythonhosted.org/packages/f2/1c/eac9edd2762606a1e11837b87c31429d834b995f00255ba438425b6a3852/SQLAlchemy-2.0.33-cp311-cp311-win_amd64.whl", hash = "sha256:c5d5a733c6af7f392435e673d1b136f6bdf2366033abb35eed680400dc730840", size = 2086706 }, + { url = "https://files.pythonhosted.org/packages/57/96/d73fd23190509aec9a8d62d0217ce526607cbd99bc0199f659c024ec9d4f/SQLAlchemy-2.0.33-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1d81e3aeab456fe24c3f0dcfd4f952a3a5ee45e9c14fc66d34c1d7a60cf7b698", size = 2089180 }, + { url = "https://files.pythonhosted.org/packages/17/b0/cec5381e016f50ad759f8b96883f66d24d1be504db34aeea4addfcc10e6f/SQLAlchemy-2.0.33-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca8788dc1baee100f09110f33a01d928cf9df4483d2bfb25a37be31a659d46bb", size = 2079655 }, + { url = "https://files.pythonhosted.org/packages/01/e3/602668ecedf4aea749d6c89b6d53bd741c79c339dede417e9e7b12c67716/SQLAlchemy-2.0.33-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60c54b677d4f0a0b2df3b79e89e84d601fb931c720176641742efd66b50601f9", size = 3229154 }, + { url = "https://files.pythonhosted.org/packages/3d/5f/a67867ad9fc553249c437bd84b311d55d758471b6ae184b7d2482f1588ee/SQLAlchemy-2.0.33-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684aee5fd811091b2f48006fb3fe6c7f2de4a716ef8d294a2aab762099753133", size = 3240045 }, + { url = "https://files.pythonhosted.org/packages/aa/27/2e05d8ca33e7d714e9b5804681395a57576a7716bf6f5460779d24fb69af/SQLAlchemy-2.0.33-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee2b82b170591ccd19d463c9798a9caeea0cad967a8d2f3264de459f582696d5", size = 3175530 }, + { url = "https://files.pythonhosted.org/packages/cc/ca/5cc2ca72b185f63ef1d29f3262a36332d51d54c74fb9136bf385d29d890f/SQLAlchemy-2.0.33-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1109cc6dc5c9d1223c42186391e6a5509e6d4ab2c30fa629573c10184f742f2e", size = 3202147 }, + { url = "https://files.pythonhosted.org/packages/44/fb/98fee23d5ffed1af441bdf7b58e57f20a9e2b40bf3b7600bdd3a8870aae4/SQLAlchemy-2.0.33-cp312-cp312-win32.whl", hash = "sha256:c633e2d2f8a7b88c06e276bbe16cb7e62fed815fcbeb69cd9752cea166ecb8e8", size = 2059540 }, + { url = "https://files.pythonhosted.org/packages/ca/a4/4956be070ee79f30dbffcd0cc83693c2fb67d8b299585401044d17261e81/SQLAlchemy-2.0.33-cp312-cp312-win_amd64.whl", hash = "sha256:77eaf8fdf305266b806a91ae4633edbf86ad37e13bd92ac85e305e7f654c19a5", size = 2085269 }, + { url = "https://files.pythonhosted.org/packages/54/ba/71b82a3e4172773fc417b81ea9e798bd85d643a0ddc95b6cfb586ce18c84/SQLAlchemy-2.0.33-py3-none-any.whl", hash = "sha256:ae294808afde1b14a1a69aa86a69cadfe391848bbb233a5332a8065e4081cabc", size = 1880669 }, ] [package.optional-dependencies] @@ -1240,6 +2412,39 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, ] +[[package]] +name = "starlette" +version = "0.38.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/9c/d85721508122ae927aa7064e10a3f47f8dabcd4fd162222afcecd0d8d7b7/starlette-0.38.4.tar.gz", hash = "sha256:53a7439060304a208fea17ed407e998f46da5e5d9b1addfea3040094512a6379", size = 2571980 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/13/fa916b69d7c21f80a9c5bde0445cbbbdb9542a9d8df73ea3d588aae55c26/starlette-0.38.4-py3-none-any.whl", hash = "sha256:526f53a77f0e43b85f583438aee1a940fd84f8fd610353e8b0c1a77ad8a87e76", size = 71427 }, +] + +[[package]] +name = "sympy" +version = "1.13.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/15/4a041424c7187f41cce678f5a02189b244e9aac61a18b45cd415a3a470f3/sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13", size = 7532926 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/f9/6845bf8fca0eaf847da21c5d5bc6cd92797364662824a11d3f836423a1a5/sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9", size = 6189289 }, +] + +[[package]] +name = "tenacity" +version = "9.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/94/91fccdb4b8110642462e653d5dcb27e7b674742ad68efd146367da7bdb10/tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b", size = 47421 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/cb/b86984bed139586d01532a587464b5805f12e397594f19f931c4c2fbfa61/tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539", size = 28169 }, +] + [[package]] name = "termcolor" version = "2.4.0" @@ -1281,6 +2486,41 @@ version = "0.5.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/80/f8/0802dd14c58b5d3d72bb9caa4315535f58787a1dc50b81bbbcaaa15451be/timeout-decorator-0.5.0.tar.gz", hash = "sha256:6a2f2f58db1c5b24a2cc79de6345760377ad8bdc13813f5265f6c3e63d16b3d7", size = 4754 } +[[package]] +name = "tokenizers" +version = "0.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "huggingface-hub" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/04/2071c150f374aab6d5e92aaec38d0f3c368d227dd9e0469a1f0966ac68d1/tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3", size = 321039 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/d6/6e1d728d765eb4102767f071bf7f6439ab10d7f4a975c9217db65715207a/tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059", size = 2533448 }, + { url = "https://files.pythonhosted.org/packages/90/79/d17a0f491d10817cd30f1121a07aa09c8e97a81114b116e473baf1577f09/tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14", size = 2440254 }, + { url = "https://files.pythonhosted.org/packages/c7/28/2d11c3ff94f9d42eceb2ea549a06e3f166fe391c5a025e5d96fac898a3ac/tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594", size = 3684971 }, + { url = "https://files.pythonhosted.org/packages/36/c6/537f22b57e6003904d35d07962dbde2f2e9bdd791d0241da976a4c7f8194/tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc", size = 3568894 }, + { url = "https://files.pythonhosted.org/packages/af/ef/3c1deed14ec59b2c8e7e2fa27b2a53f7d101181277a43b89ab17d891ef2e/tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2", size = 3426873 }, + { url = "https://files.pythonhosted.org/packages/06/db/c0320c4798ac6bd12d2ef895bec9d10d216a3b4d6fff10e9d68883ea7edc/tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe", size = 3965050 }, + { url = "https://files.pythonhosted.org/packages/4c/8a/a166888d6cb14db55f5eb7ce0b1d4777d145aa27cbf4f945712cf6c29935/tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d", size = 4047855 }, + { url = "https://files.pythonhosted.org/packages/a7/03/fb50fc03f86016b227a967c8d474f90230c885c0d18f78acdfda7a96ce56/tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa", size = 3608228 }, + { url = "https://files.pythonhosted.org/packages/5b/cd/0385e1026e1e03732fd398e964792a3a8433918b166748c82507e014d748/tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6", size = 9633115 }, + { url = "https://files.pythonhosted.org/packages/25/50/8f8ad0bbdaf09d04b15e6502d1fa1c653754ed7e016e4ae009726aa1a4e4/tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b", size = 9949062 }, + { url = "https://files.pythonhosted.org/packages/db/11/31be66710f1d14526f3588a441efadeb184e1e68458067007b20ead03c59/tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256", size = 2041039 }, + { url = "https://files.pythonhosted.org/packages/65/8e/6d7d72b28f22c422cff8beae10ac3c2e4376b9be721ef8167b7eecd1da62/tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66", size = 2220386 }, + { url = "https://files.pythonhosted.org/packages/63/90/2890cd096898dcdb596ee172cde40c0f54a9cf43b0736aa260a5501252af/tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153", size = 2530580 }, + { url = "https://files.pythonhosted.org/packages/74/d1/f4e1e950adb36675dfd8f9d0f4be644f3f3aaf22a5677a4f5c81282b662e/tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a", size = 2436682 }, + { url = "https://files.pythonhosted.org/packages/ed/30/89b321a16c58d233e301ec15072c0d3ed5014825e72da98604cd3ab2fba1/tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95", size = 3693494 }, + { url = "https://files.pythonhosted.org/packages/05/40/fa899f32de483500fbc78befd378fd7afba4270f17db707d1a78c0a4ddc3/tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266", size = 3566541 }, + { url = "https://files.pythonhosted.org/packages/67/14/e7da32ae5fb4971830f1ef335932fae3fa57e76b537e852f146c850aefdf/tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52", size = 3430792 }, + { url = "https://files.pythonhosted.org/packages/f2/4b/aae61bdb6ab584d2612170801703982ee0e35f8b6adacbeefe5a3b277621/tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f", size = 3962812 }, + { url = "https://files.pythonhosted.org/packages/0a/b6/f7b7ef89c4da7b20256e6eab23d3835f05d1ca8f451d31c16cbfe3cd9eb6/tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840", size = 4024688 }, + { url = "https://files.pythonhosted.org/packages/80/54/12047a69f5b382d7ee72044dc89151a2dd0d13b2c9bdcc22654883704d31/tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3", size = 3610961 }, + { url = "https://files.pythonhosted.org/packages/52/b7/1e8a913d18ac28feeda42d4d2d51781874398fb59cd1c1e2653a4b5742ed/tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea", size = 9631367 }, + { url = "https://files.pythonhosted.org/packages/ac/3d/2284f6d99f8f21d09352b88b8cfefa24ab88468d962aeb0aa15c20d76b32/tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c", size = 9950121 }, + { url = "https://files.pythonhosted.org/packages/2a/94/ec3369dbc9b7200c14c8c7a1a04c78b7a7398d0c001e1b7d1ffe30eb93a0/tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57", size = 2044069 }, + { url = "https://files.pythonhosted.org/packages/0c/97/80bff6937e0c67d30c0facacd4f0bcf4254e581aa4995c73cef8c8640e56/tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a", size = 2214527 }, +] + [[package]] name = "tomlkit" version = "0.13.2" @@ -1290,6 +2530,53 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f9/b6/a447b5e4ec71e13871be01ba81f5dfc9d0af7e473da256ff46bc0e24026f/tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde", size = 37955 }, ] +[[package]] +name = "torch" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "sympy" }, + { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/83/9b7681e41e59adb6c2b042f7e8eb716515665a6eed3dda4215c6b3385b90/torch-2.4.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e743adadd8c8152bb8373543964551a7cb7cc20ba898dc8f9c0cdbe47c283de0", size = 797262052 }, + { url = "https://files.pythonhosted.org/packages/84/fa/2b510a02809ddd70aed821bc2328c4effd206503df38a1328c9f1f957813/torch-2.4.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:7334325c0292cbd5c2eac085f449bf57d3690932eac37027e193ba775703c9e6", size = 89850473 }, + { url = "https://files.pythonhosted.org/packages/18/cf/f69dff972a748e08e1bf602ef94ea5c6d4dd2f41cea22c8ad67a607d8b41/torch-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:97730014da4c57ffacb3c09298c6ce05400606e890bd7a05008d13dd086e46b1", size = 197860580 }, + { url = "https://files.pythonhosted.org/packages/b7/d0/5e8f96d83889e77b478b90e7d8d24a5fc14c5c9350c6b93d071f45f39096/torch-2.4.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f169b4ea6dc93b3a33319611fcc47dc1406e4dd539844dcbd2dec4c1b96e166d", size = 62144370 }, + { url = "https://files.pythonhosted.org/packages/bf/55/b6c74df4695f94a9c3505021bc2bd662e271d028d055b3b2529f3442a3bd/torch-2.4.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:997084a0f9784d2a89095a6dc67c7925e21bf25dea0b3d069b41195016ccfcbb", size = 797168571 }, + { url = "https://files.pythonhosted.org/packages/9a/5d/327fb72044c22d68a826643abf2e220db3d7f6005a41a6b167af1ffbc708/torch-2.4.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc3988e8b36d1e8b998d143255d9408d8c75da4ab6dd0dcfd23b623dfb0f0f57", size = 89746726 }, + { url = "https://files.pythonhosted.org/packages/dc/95/a14dd84ce65e5ce176176393a80b2f74864ee134a31f590140456a4c0959/torch-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:3374128bbf7e62cdaed6c237bfd39809fbcfaa576bee91e904706840c3f2195c", size = 197807123 }, + { url = "https://files.pythonhosted.org/packages/c7/87/489ebb234e75760e06fa4789fa6d4e13c125beefa1483ce35c9e43dcd395/torch-2.4.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:91aaf00bfe1ffa44dc5b52809d9a95129fca10212eca3ac26420eb11727c6288", size = 62123112 }, +] + +[[package]] +name = "tqdm" +version = "4.66.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/83/6ba9844a41128c62e810fddddd72473201f3eacde02046066142a2d96cc5/tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad", size = 169504 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/5d/acf5905c36149bbaec41ccf7f2b68814647347b72075ac0b1fe3022fdc73/tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd", size = 78351 }, +] + [[package]] name = "traitlets" version = "5.14.3" @@ -1300,15 +2587,54 @@ wheels = [ ] [[package]] -name = "typeguard" -version = "4.3.0" +name = "transformers" +version = "4.44.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions" }, + { name = "filelock" }, + { name = "huggingface-hub" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "regex" }, + { name = "requests" }, + { name = "safetensors" }, + { name = "tokenizers" }, + { name = "tqdm" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/a3/81de49357a3c6ac4421d48d9662b53293838f217baf3f3bb9eb55f89fab6/transformers-4.44.2.tar.gz", hash = "sha256:36aa17cc92ee154058e426d951684a2dab48751b35b49437896f898931270826", size = 8110312 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/75/35/07c9879163b603f0e464b0f6e6e628a2340cfc7cdc5ca8e7d52d776710d4/transformers-4.44.2-py3-none-any.whl", hash = "sha256:1c02c65e7bfa5e52a634aff3da52138b583fc6f263c1f28d547dc144ba3d412d", size = 9465369 }, +] + +[[package]] +name = "triton" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock", marker = "python_full_version < '3.13'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/3e/a2f59384587eff6aeb7d37b6780de7fedd2214935e27520430ca9f5b7975/triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ce8520437c602fb633f1324cc3871c47bee3b67acf9756c1a66309b60e3216c", size = 209438883 }, + { url = "https://files.pythonhosted.org/packages/fe/7b/7757205dee3628f75e7991021d15cd1bd0c9b044ca9affe99b50879fc0e1/triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb", size = 209464695 }, +] + +[[package]] +name = "types-aiofiles" +version = "24.1.0.20240626" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/e9/013940b017c313c2e15c64017268fdb0c25e0638621fb8a5d9ebe00fb0f4/types-aiofiles-24.1.0.20240626.tar.gz", hash = "sha256:48604663e24bc2d5038eac05ccc33e75799b0779e93e13d6a8f711ddc306ac08", size = 9357 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/ad/c4b3275d21c5be79487c4f6ed7cd13336997746fe099236cb29256a44a90/types_aiofiles-24.1.0.20240626-py3-none-any.whl", hash = "sha256:7939eca4a8b4f9c6491b6e8ef160caee9a21d32e18534a57d5ed90aee47c66b4", size = 9389 }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8d/e1/3178b3e5369a98239ed7301e3946747048c66f4023163d55918f11b82d4e/typeguard-4.3.0.tar.gz", hash = "sha256:92ee6a0aec9135181eae6067ebd617fd9de8d75d714fb548728a4933b1dea651", size = 73374 } + +[[package]] +name = "types-tqdm" +version = "4.66.0.20240417" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/65/f14716c07d40f51be63cb46d89a71c4c5314bcf501506930b7fa5201ece0/types-tqdm-4.66.0.20240417.tar.gz", hash = "sha256:16dce9ef522ea8d40e4f5b8d84dd8a1166eefc13ceee7a7e158bf0f1a1421a31", size = 11916 } wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/de/be0ba39ee73760bf33329b7c6f95bc67e96593c69c881671e312538e24bb/typeguard-4.3.0-py3-none-any.whl", hash = "sha256:4d24c5b39a117f8a895b9da7a9b3114f04eb63bade45a4492de49b175b6f7dfa", size = 35385 }, + { url = "https://files.pythonhosted.org/packages/d7/dd/39a411628bfdeeac54587aa013a83a446a2ecf8e7e324744b9ba3bf076f3/types_tqdm-4.66.0.20240417-py3-none-any.whl", hash = "sha256:248aef1f9986b7b8c2c12b3cb4399fc17dba0a29e7e3f3f9cd704babb879383d", size = 19163 }, ] [[package]] @@ -1320,6 +2646,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, ] +[[package]] +name = "tzdata" +version = "2024.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/74/5b/e025d02cb3b66b7b76093404392d4b44343c69101cc85f4d180dd5784717/tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd", size = 190559 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/58/f9c9e6be752e9fcb8b6a0ee9fb87e6e7a1f6bcab2cdc73f02bb7ba91ada0/tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252", size = 345370 }, +] + [[package]] name = "unidiff" version = "0.7.5" @@ -1350,6 +2685,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ca/1c/89ffc63a9605b583d5df2be791a27bc1a42b7c32bab68d3c8f2f73a98cd4/urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", size = 121444 }, ] +[[package]] +name = "usearch" +version = "2.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "tqdm" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/29/fbcda825d7b14f24f3b9b543f88415ddbc5e7fa1f64394064fb0d2ba4d93/usearch-2.15.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0c403f10a1f810f97f628a56a051b62eb03a8312082c3d7e1613742eb9d9ef15", size = 704398 }, + { url = "https://files.pythonhosted.org/packages/60/2d/12eecee36caba60e3ccef2ac753a936bdde2f34d9ae6e1381141547e7ad7/usearch-2.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf1ab2b68a2a1ae8175e60529b8061839e53a528ea192514ca0880e070d9f8e3", size = 377817 }, + { url = "https://files.pythonhosted.org/packages/3a/62/371dd595b7dc2d2a030f61607cf6c3bb58e31212eebfab0c8207498f8226/usearch-2.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93bb7e650b8990f1105fdf37b628d8b06c212231a74ff2eaa1b0768a8faebeba", size = 364648 }, + { url = "https://files.pythonhosted.org/packages/a3/6c/2946088dacbfa6e7deb8e2f4a659a6a08700c65806130e7875457657fae8/usearch-2.15.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e31ad6ffd8a49b933c4507a980b1e36eca42ca978f615f24e7f495b70c41c939", size = 1268388 }, + { url = "https://files.pythonhosted.org/packages/dd/fb/5500d0d04a9694dbf0dc7d5a425953e5de2b79000f6052fe1432a6f30d11/usearch-2.15.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eebfb4b150cb1d4651996c1a2b49c9576529cf3d2b67b8c7f5f26df9d2eae411", size = 1461215 }, + { url = "https://files.pythonhosted.org/packages/ea/dd/c4c9001a45e174a5c075b0dec76b00fd7e8ad5efbcbd5df4e8ce9b2f09b2/usearch-2.15.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:672ac913a23ef32fa1e3d8a5dc73373c0cc3ddf1609a3964c2f2caae24715f67", size = 2199161 }, + { url = "https://files.pythonhosted.org/packages/c5/94/4c8230720e8400478fc5662a2a87783e0c62f23f338244cd56b9beea721d/usearch-2.15.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b42bb7317a770e975429a2c0bb9abd11e42ea010a4e68a916b7476dd20fd2f62", size = 2330982 }, + { url = "https://files.pythonhosted.org/packages/19/d8/f1b058b1d6fc6cd819d3647d5603233eba17786a61bf9c1221f2213ad9bb/usearch-2.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:bbdc71cc459daaf937d1fbc812b760af950a246df144e0ce833bc51953fcabb7", size = 281856 }, + { url = "https://files.pythonhosted.org/packages/6b/96/7b5e3935604615f8789ce29cb863d337751881ea25962cd90dcf0a436c32/usearch-2.15.1-cp311-cp311-win_arm64.whl", hash = "sha256:50a23312061d90a5eb619fca4fa1636624e8932e622186b955bac4910a3c8b17", size = 261995 }, + { url = "https://files.pythonhosted.org/packages/64/b0/d2caba6577de9ee68361192199fc604264a86d0e08b68d4d12504679e699/usearch-2.15.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c762770548b94e7cae749f1e4357b00d5a9f2ddcd8615c495d6d15bc54d9dce2", size = 710219 }, + { url = "https://files.pythonhosted.org/packages/42/d8/7648c436c39eb90016c5d1001ea2116668f4874312d2affd3819ad3064d2/usearch-2.15.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cd4d7163aaa972906212d90ba0c128c18418b0a093f7410003b02f9316937035", size = 381559 }, + { url = "https://files.pythonhosted.org/packages/b6/42/daeab4558552784bb231f96c1845498286c3a2f2680fc396697f13ef9262/usearch-2.15.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61190502a8448b8ceed807adde69a951ee423cae85955486264c3b8b3db9b50e", size = 366311 }, + { url = "https://files.pythonhosted.org/packages/bd/2d/1f773b009cd6b668fc4865ce16628f33061323b5e493880663e4f3ee554c/usearch-2.15.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa324fb6a086f44e53a1ef480e3857d8a6db948e829916767af27c32a6b8c33f", size = 1269633 }, + { url = "https://files.pythonhosted.org/packages/bb/4c/dbd2f86ebb83393a47fd966b490840cea94f39e82c9f4e2e9ccb3c7284f6/usearch-2.15.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:8cb9d0fff2b73c23b1344ed75d382af8b0f426b85db2f2a6a80816af8a7720d4", size = 1465092 }, + { url = "https://files.pythonhosted.org/packages/82/e4/c5f2842da05b3d9e37b72bcee7d91e860871eeba60a2f0f2418651d94938/usearch-2.15.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93134604a1dbefd2b83b9eb9a71f80d9a9aac1db94c3c5cfd18ecc6cff1a4d44", size = 2198308 }, + { url = "https://files.pythonhosted.org/packages/69/41/c4905f3e9c5bb72ed88e99ac0358e3d0d69b486378d6194ecc555825ce94/usearch-2.15.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2e961ef7e5b408d9aa3706f945bef7eda36a3d72411b79118101ff7c722df987", size = 2334937 }, + { url = "https://files.pythonhosted.org/packages/f8/23/08054d4cb19ea4c59731dc72176a65723951b9745ac281b745423f758a57/usearch-2.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:4ed5c05f460c4202d9e13f173b3c0895f4195f1107c2671d23dfc53b4e7a0007", size = 283118 }, + { url = "https://files.pythonhosted.org/packages/24/ff/0e5232ed5c539afc0ce1c07e7fc046a01d4769d601e0fe30d83458efa447/usearch-2.15.1-cp312-cp312-win_arm64.whl", hash = "sha256:d32e1cd5624efe64c735f70f34ff969a628ccee964f213d58c6fb8bab1d6244e", size = 262874 }, +] + [[package]] name = "virtualenv" version = "20.26.3" @@ -1364,6 +2728,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/07/4d/410156100224c5e2f0011d435e477b57aed9576fc7fe137abcf14ec16e11/virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589", size = 5684792 }, ] +[[package]] +name = "wandb" +version = "0.17.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "docker-pycreds" }, + { name = "gitpython" }, + { name = "platformdirs" }, + { name = "protobuf" }, + { name = "psutil" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sentry-sdk" }, + { name = "setproctitle" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/60/ef06623bc0f5a3153614d6991e925d6b272466d2b4a1ba9a384019ba690b/wandb-0.17.8.tar.gz", hash = "sha256:d3d0ae27e85366d8ed48e79873d409eb43ad5fa43792506a6240b875b1d44c87", size = 6145459 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/3c/3aa5d7459fbf363732bf7ce2d292fcdcf64e883780213fd753e2081d59b5/wandb-0.17.8-py3-none-any.whl", hash = "sha256:0e240d9e92c2557fba8415266ee6e124420cb80353e40d702a597f3cb609fad6", size = 5078248 }, + { url = "https://files.pythonhosted.org/packages/e7/01/1a64871641a1287eebea7af919d924c1aa309022ebadbabe008bbddc4c8e/wandb-0.17.8-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:a1f8a032776bea9a9aec9c6c3671142a31ed962cc40a20988805cedea57fc16c", size = 6919649 }, + { url = "https://files.pythonhosted.org/packages/5f/5c/4ab25df5728672064a59a4498d3f92ec077e0694b6c77d73cedec441df45/wandb-0.17.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c6e60534f21e9a322df6e9ebc3e4188d06ed3413985828130508f06c2393116e", size = 6649713 }, + { url = "https://files.pythonhosted.org/packages/8a/8e/6d0fb856cde3298f00158f4ad6e497fa4a3d5a765faea4e7b940d4682ed8/wandb-0.17.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e0edcb0eee9a392a7115d349e790c8df10ae2d488e525ace2f8d1589ddda6de", size = 9026454 }, + { url = "https://files.pythonhosted.org/packages/5a/5b/6ebfb97134918bac6172b64f61ccac51464f8e8f2fb91c35845941733163/wandb-0.17.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1762ecc98c38d7a040531d0a01e5090efcaf594ebac87d6929316884828c6393", size = 9386001 }, + { url = "https://files.pythonhosted.org/packages/04/1e/5097d25aaec1d4148692afbc22abca7cc0c966bc67259fa42257e10ef512/wandb-0.17.8-py3-none-win32.whl", hash = "sha256:200ee7c887181db2c879be0d5f0ee6a1d6199ea97b7a2dbca73dcedf5a4cfd32", size = 6558073 }, + { url = "https://files.pythonhosted.org/packages/a9/16/73bdedc32daf7675a81d3d191975deefe551302d720cbc5f7b247bf7a9b2/wandb-0.17.8-py3-none-win_amd64.whl", hash = "sha256:325ce529e3af7dc9eaea889ba2c2d9af7e19a761136300ae5a4c1b5df0c9f02d", size = 6558077 }, +] + [[package]] name = "wcwidth" version = "0.2.13" @@ -1381,3 +2773,70 @@ sdist = { url = "https://files.pythonhosted.org/packages/72/b2/e3edc608823348e62 wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/0dc75b64a764ea1cb8e4c32d1fb273c147304d4e5483cd58be482dc62e45/xmod-1.8.1-py3-none-any.whl", hash = "sha256:a24e9458a4853489042522bdca9e50ee2eac5ab75c809a91150a8a7f40670d48", size = 4610 }, ] + +[[package]] +name = "yarl" +version = "1.9.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/50/dcf6d0ea0da893b23f73ea5b21fa1f96fd45e9cb4404cc6b665368b4ab19/yarl-1.9.7.tar.gz", hash = "sha256:f28e602edeeec01fc96daf7728e8052bc2e12a672e2a138561a1ebaf30fd9df7", size = 153261 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/26/99c3331253bf9d906d8292aba292d5801cef75537098a47027431746e43c/yarl-1.9.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:596069ddeaf72b5eb36cd714dcd2b5751d0090d05a8d65113b582ed9e1c801fb", size = 188476 }, + { url = "https://files.pythonhosted.org/packages/dc/f2/789992d30e2b6c9a6460e1da4d59ec1d5c91aa624b97788cce4ce83f9a54/yarl-1.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cb870907e8b86b2f32541403da9455afc1e535ce483e579bea0e6e79a0cc751c", size = 112728 }, + { url = "https://files.pythonhosted.org/packages/f2/d2/95e5686881ac6e864d6edb18d92e5cc80a18d1b4ddcbc84f2f0d3b451da9/yarl-1.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ca5e86be84492fa403c4dcd4dcaf8e1b1c4ffc747b5176f7c3d09878c45719b0", size = 110853 }, + { url = "https://files.pythonhosted.org/packages/d7/cc/773c741e3fe0f9d38c87e1faacc4c5864dbe840fcc46e889482678b2891f/yarl-1.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99cecfb51c84d00132db909e83ae388793ca86e48df7ae57f1be0beab0dcce5", size = 504153 }, + { url = "https://files.pythonhosted.org/packages/a5/a4/7fabbd75113591b6cbab9a71e3a2e3c0170fb7958afa3a399e8ed9968023/yarl-1.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25508739e9b44d251172145f54c084b71747b09e4d237dc2abb045f46c36a66e", size = 525561 }, + { url = "https://files.pythonhosted.org/packages/52/01/2aabad17c2549e7068f8ec855e9de8d56d95a3a0823e3a398ea17582c6a0/yarl-1.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:60f3b5aec3146b6992640592856414870f5b20eb688c1f1d5f7ac010a7f86561", size = 520140 }, + { url = "https://files.pythonhosted.org/packages/46/c9/68d4f410c24a10fe0a9d0ab6fa975da433329cd611f12cf11f35e625c60d/yarl-1.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1557456afce5db3d655b5f8a31cdcaae1f47e57958760525c44b76e812b4987", size = 508027 }, + { url = "https://files.pythonhosted.org/packages/f8/87/107ac4245975b7ef5d65b99375fa9e1e71bbe66974a4d2c4a6e9375c77d2/yarl-1.9.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71bb1435a84688ed831220c5305d96161beb65cac4a966374475348aa3de4575", size = 489310 }, + { url = "https://files.pythonhosted.org/packages/c0/81/c6e44b3b41227b44337d9bbd0a585b79e3ce12e75500b6470b1b0ea2b5a2/yarl-1.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f87d8645a7a806ec8f66aac5e3b1dcb5014849ff53ffe2a1f0b86ca813f534c7", size = 504009 }, + { url = "https://files.pythonhosted.org/packages/36/07/272c6c0d36c13a7fd62d2ae3850a70abe1de54afc503a2eb3d61fd083b3e/yarl-1.9.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:58e3f01673873b8573da3abe138debc63e4e68541b2104a55df4c10c129513a4", size = 502250 }, + { url = "https://files.pythonhosted.org/packages/d3/23/a26969785e3639b73c796061deb4e3d12df8813bf6f8a51483ab57039b6d/yarl-1.9.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8af0bbd4d84f8abdd9b11be9488e32c76b1501889b73c9e2292a15fb925b378b", size = 533891 }, + { url = "https://files.pythonhosted.org/packages/d7/f0/a73047a29b643869ff3f8af51d400763546c1ac98fa7b8eab5c0d12905dd/yarl-1.9.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:7fc441408ed0d9c6d2d627a02e281c21f5de43eb5209c16636a17fc704f7d0f8", size = 537621 }, + { url = "https://files.pythonhosted.org/packages/be/ff/531ce46088bd8834bedf09cb2252b3af3d1f8a4b79f4c4e5d8c5480caa51/yarl-1.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a9552367dc440870556da47bb289a806f08ad06fbc4054072d193d9e5dd619ba", size = 519177 }, + { url = "https://files.pythonhosted.org/packages/cd/71/28e058729ea289903a19169dda80944fb13704f06e03defb4be97e97f7b3/yarl-1.9.7-cp311-cp311-win32.whl", hash = "sha256:628619008680a11d07243391271b46f07f13b75deb9fe92ef342305058c70722", size = 98901 }, + { url = "https://files.pythonhosted.org/packages/7e/bd/1ddd698d8307f35c71a6ee5e3dffeeff0f9e95d926ed9444a0df23a39d5e/yarl-1.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:bc23d870864971c8455cfba17498ccefa53a5719ea9f5fce5e7e9c1606b5755f", size = 108645 }, + { url = "https://files.pythonhosted.org/packages/dd/4e/b3d7679b158a981e6fa36c1d4388a7c3f4adb1b5c33ec22708ec550ddf91/yarl-1.9.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d8cf3d0b67996edc11957aece3fbce4c224d0451c7c3d6154ec3a35d0e55f6b", size = 188957 }, + { url = "https://files.pythonhosted.org/packages/0e/7b/2fe90636cf0f745210bcb79347369a3882e829e1070ab7d8b3949684d209/yarl-1.9.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a7748cd66fef49c877e59503e0cc76179caf1158d1080228e67e1db14554f08", size = 113292 }, + { url = "https://files.pythonhosted.org/packages/e2/ee/fae90e40bb4c2af6fd8a1a50d052140101a6634f1d2b32596a6cf53f4244/yarl-1.9.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a6fa3aeca8efabb0fbbb3b15e0956b0cb77f7d9db67c107503c30af07cd9e00", size = 110974 }, + { url = "https://files.pythonhosted.org/packages/28/60/3e985358440d6467c2ea81673000aef762c448462ab88e98e6676845f24f/yarl-1.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf37dd0008e5ac5c3880198976063c491b6a15b288d150d12833248cf2003acb", size = 504133 }, + { url = "https://files.pythonhosted.org/packages/e3/ae/f0730026d7011f5403a8f49fec4e666358db43a0339dc8259b19a7c2e6f1/yarl-1.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87aa5308482f248f8c3bd9311cd6c7dfd98ea1a8e57e35fb11e4adcac3066003", size = 521269 }, + { url = "https://files.pythonhosted.org/packages/24/5d/1b982866e45906f236cb9a93ec9a07a5b61854b34f0f6fa368056ee9cda3/yarl-1.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:867b13c1b361f9ba5d2f84dc5408082f5d744c83f66de45edc2b96793a9c5e48", size = 518617 }, + { url = "https://files.pythonhosted.org/packages/8c/ec/eaab7e272ddf1eab39b793e5cd3af304ac28d9342f6a3f2e356276bcc4fe/yarl-1.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ce93947554c2c85fe97fc4866646ec90840bc1162e4db349b37d692a811755", size = 510893 }, + { url = "https://files.pythonhosted.org/packages/98/c3/ed093752106c61e3b2a108f798649cb24119484802bb5ca521a36cf559bd/yarl-1.9.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcd3d94b848cba132f39a5b40d80b0847d001a91a6f35a2204505cdd46afe1b2", size = 487621 }, + { url = "https://files.pythonhosted.org/packages/71/ff/bce0bda27957d4f8cdb8e56b807f185683e8b6a3717637fb8d1faa39269d/yarl-1.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d06d6a8f98dd87646d98f0c468be14b201e47ec6092ad569adf835810ad0dffb", size = 506332 }, + { url = "https://files.pythonhosted.org/packages/ff/b5/95702c9719808331d2401e13660af86b323139f0293feb3a44698a194439/yarl-1.9.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:91567ff4fce73d2e7ac67ed5983ad26ba2343bc28cb22e1e1184a9677df98d7c", size = 505515 }, + { url = "https://files.pythonhosted.org/packages/17/7d/74a41e5d49329be134602a7e840adf3a499c7562afb982282d079067d5e4/yarl-1.9.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1d5594512541e63188fea640b7f066c218d2176203d6e6f82abf702ae3dca3b2", size = 528662 }, + { url = "https://files.pythonhosted.org/packages/c1/f9/8a9083b6b73944c0bb5c99cfc0edf3bec14456b621f76b338fc945afc69f/yarl-1.9.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c2743e43183e4afbb07d5605693299b8756baff0b086c25236c761feb0e3c56", size = 539801 }, + { url = "https://files.pythonhosted.org/packages/44/74/877076885263c214abbed93462ef2e4e95579c047d188530c849ea207846/yarl-1.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:daa69a3a2204355af39f4cfe7f3870d87c53d77a597b5100b97e3faa9460428b", size = 524783 }, + { url = "https://files.pythonhosted.org/packages/95/58/e509c4ad1460bce6cf5cd485c5baa5c4c6a9a53999a82f90462f7908ee26/yarl-1.9.7-cp312-cp312-win32.whl", hash = "sha256:36b16884336c15adf79a4bf1d592e0c1ffdb036a760e36a1361565b66785ec6c", size = 98822 }, + { url = "https://files.pythonhosted.org/packages/b0/71/c8136c8c240ccf9d38715aaad31fb4f2c2f14e83c6db6b83d389274b0e9e/yarl-1.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:2ead2f87a1174963cc406d18ac93d731fbb190633d3995fa052d10cefae69ed8", size = 108657 }, + { url = "https://files.pythonhosted.org/packages/2b/3c/c159233854485307e3355af11099d9c351c8475b10b2b3dc64bb8cdc608b/yarl-1.9.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:808eddabcb6f7b2cdb6929b3e021ac824a2c07dc7bc83f7618e18438b1b65781", size = 185789 }, + { url = "https://files.pythonhosted.org/packages/89/f3/24c3b30a9d95827280130ecb6ef33f0ab2bdc690391f19178493cf149196/yarl-1.9.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:395ab0d8ce6d104a988da429bcbfd445e03fb4c911148dfd523f69d13f772e47", size = 111661 }, + { url = "https://files.pythonhosted.org/packages/5c/a1/e610bfb3c74efdbeeff19ee370e6a76dd552c66680a9180777828dc2e7fa/yarl-1.9.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:49827dfccbd59c4499605c13805e947349295466e490860a855b7c7e82ec9c75", size = 109560 }, + { url = "https://files.pythonhosted.org/packages/d0/aa/dc3657bcf79cd98bdfa03c1b85c88de2a36037171894fa56890146e08615/yarl-1.9.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b8bbdd425d0978311520ea99fb6c0e9e04e64aee84fac05f3157ace9f81b05", size = 485985 }, + { url = "https://files.pythonhosted.org/packages/cf/2a/a69ad3ae4facef03df228790e6f4cfd4971cc267ee140fc8f6331c7e6194/yarl-1.9.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71d33fd1c219b5b28ee98cd76da0c9398a4ed4792fd75c94135237db05ba5ca8", size = 501335 }, + { url = "https://files.pythonhosted.org/packages/f5/8f/b0a35ecd3f31fdffa704d11bf452a277ce4b29e7b878a94e636349945d87/yarl-1.9.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62440431741d0b7d410e5cbad800885e3289048140a43390ecab4f0b96dde3bb", size = 502295 }, + { url = "https://files.pythonhosted.org/packages/39/68/bfc953df3a6ee6c0c9cd9f84d488681e377e49b8ced6d2d5b9289d639c89/yarl-1.9.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db97210433366dfba55590e48285b89ad0146c52bf248dd0da492dd9f0f72cf", size = 493550 }, + { url = "https://files.pythonhosted.org/packages/40/5d/5092b93da54659f1e737f95d9a554f79aa68d1fda05e26c9f0e86184d894/yarl-1.9.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:653597b615809f2e5f4dba6cd805608b6fd3597128361a22cc612cf7c7a4d1bf", size = 470835 }, + { url = "https://files.pythonhosted.org/packages/66/e6/dedce99c469f8d1c66432006910adf943fdb7d0cfb77f026030e234f234f/yarl-1.9.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:df47612129e66f7ce7c9994d4cd4e6852f6e3bf97699375d86991481796eeec8", size = 490437 }, + { url = "https://files.pythonhosted.org/packages/50/52/b36cd8d9356734fda4a668ce358f56ecb16c7171b8a658fdfde31476de7c/yarl-1.9.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5e338b6febbae6c9fe86924bac3ea9c1944e33255c249543cd82a4af6df6047b", size = 492765 }, + { url = "https://files.pythonhosted.org/packages/aa/9e/6ad4300fc040fc34e323f1254a05886a6441d05bd251a9a4063ed8d35c32/yarl-1.9.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e649d37d04665dddb90994bbf0034331b6c14144cc6f3fbce400dc5f28dc05b7", size = 508901 }, + { url = "https://files.pythonhosted.org/packages/77/cc/8b27ea0a0faaba43b389c3a170c25a1fc063c34ae41c8660055e47d5dc89/yarl-1.9.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0a1b8fd849567be56342e988e72c9d28bd3c77b9296c38b9b42d2fe4813c9d3f", size = 519676 }, + { url = "https://files.pythonhosted.org/packages/ca/e6/1b88c9b952c69b4bfb5d38260de4bf65eab4d0787bfcdc0a4d680e084ccb/yarl-1.9.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f9d715b2175dff9a49c6dafdc2ab3f04850ba2f3d4a77f69a5a1786b057a9d45", size = 510005 }, + { url = "https://files.pythonhosted.org/packages/72/81/c456d5060bf4c2cb1213cd71e9211e0859ce6fff444bd13c61e2ae681b64/yarl-1.9.7-cp313-cp313-win32.whl", hash = "sha256:bc9233638b07c2e4a3a14bef70f53983389bffa9e8cb90a2da3f67ac9c5e1842", size = 483263 }, + { url = "https://files.pythonhosted.org/packages/5d/08/fe455390603d0377140c1ef02287dd32d3d4a0a6d596aa4a1fc881ac68d2/yarl-1.9.7-cp313-cp313-win_amd64.whl", hash = "sha256:62e110772330d7116f91e79cd83fef92545cb2f36414c95881477aa01971f75f", size = 491236 }, + { url = "https://files.pythonhosted.org/packages/48/04/8cc40203453e4bce05cd3e9a5bea930ac0086aa4848a9c41aa1da13ae1a0/yarl-1.9.7-py3-none-any.whl", hash = "sha256:49935cc51d272264358962d050d726c3e5603a616f53e52ea88e9df1728aa2ee", size = 35402 }, +] + +[[package]] +name = "zipp" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d3/8b/1239a3ef43a0d0ebdca623fb6413bc7702c321400c5fdd574f0b7aa0fbb4/zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b", size = 23848 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/9e/c96f7a4cd0bf5625bb409b7e61e99b1130dc63a98cb8b24aeabae62d43e8/zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064", size = 8988 }, +] From 63bf901956301720eceb62277eba521111d3b439 Mon Sep 17 00:00:00 2001 From: James Braza Date: Tue, 3 Sep 2024 15:46:06 -0700 Subject: [PATCH 3/5] Propagated first pass of the repo --- ldp/__init__.py | 0 ldp/agent/__init__.py | 33 ++ ldp/agent/agent.py | 168 ++++++ ldp/agent/agent_client.py | 147 +++++ ldp/agent/dqn_agent.py | 154 +++++ ldp/agent/memory_agent.py | 100 ++++ ldp/agent/react_agent.py | 140 +++++ ldp/agent/simple_agent.py | 102 ++++ ldp/agent/tree_of_thoughts_agent.py | 142 +++++ ldp/alg/__init__.py | 0 ldp/alg/algorithms.py | 167 ++++++ ldp/alg/beam_search.py | 218 +++++++ ldp/alg/callbacks.py | 377 ++++++++++++ ldp/alg/datasets.py | 11 + ldp/alg/optimizer/__init__.py | 96 ++++ ldp/alg/optimizer/ape.py | 297 ++++++++++ ldp/alg/optimizer/dqn.py | 492 ++++++++++++++++ ldp/alg/optimizer/memory.py | 108 ++++ ldp/alg/optimizer/openai_sft_optimizer.py | 309 ++++++++++ ldp/alg/optimizer/opt.py | 50 ++ ldp/alg/optimizer/replay_buffers.py | 34 ++ ldp/alg/rollout.py | 309 ++++++++++ ldp/alg/runners.py | 313 ++++++++++ ldp/alg/tree_search.py | 149 +++++ ldp/data_structures.py | 112 ++++ ldp/graph/__init__.py | 0 ldp/graph/async_torch.py | 222 ++++++++ ldp/graph/common_ops.py | 370 ++++++++++++ ldp/graph/gradient_estimators.py | 144 +++++ ldp/graph/memory.py | 146 +++++ ldp/graph/modules/__init__.py | 31 + ldp/graph/modules/llm_call.py | 30 + ldp/graph/modules/react.py | 272 +++++++++ ldp/graph/modules/reflect.py | 59 ++ ldp/graph/modules/thought.py | 44 ++ ldp/graph/modules/value_function.py | 366 ++++++++++++ ldp/graph/op_utils.py | 148 +++++ ldp/graph/ops.py | 513 +++++++++++++++++ ldp/graph/pydantic_patch.py | 50 ++ ldp/graph/torch_ops.py | 137 +++++ ldp/llms/__init__.py | 41 ++ ldp/llms/chat.py | 351 ++++++++++++ ldp/llms/embeddings.py | 134 +++++ ldp/llms/prompts.py | 100 ++++ ldp/py.typed | 0 tests/.gitignore | 1 + tests/__init__.py | 8 + tests/conftest.py | 30 + tests/test_agents.py | 662 ++++++++++++++++++++++ tests/test_algorithms.py | 34 ++ tests/test_buffers.py | 22 + tests/test_context_managers.py | 138 +++++ tests/test_embeddings.py | 138 +++++ tests/test_envs.py | 154 +++++ tests/test_gradients.py | 379 +++++++++++++ tests/test_llms.py | 338 +++++++++++ tests/test_memory.py | 47 ++ tests/test_modules.py | 454 +++++++++++++++ tests/test_ops.py | 338 +++++++++++ tests/test_optimizer.py | 661 +++++++++++++++++++++ tests/test_prompts.py | 21 + tests/test_rollouts.py | 268 +++++++++ tests/test_runners.py | 141 +++++ tests/test_torch_ops.py | 326 +++++++++++ 64 files changed, 11346 insertions(+) create mode 100644 ldp/__init__.py create mode 100644 ldp/agent/__init__.py create mode 100644 ldp/agent/agent.py create mode 100644 ldp/agent/agent_client.py create mode 100644 ldp/agent/dqn_agent.py create mode 100644 ldp/agent/memory_agent.py create mode 100644 ldp/agent/react_agent.py create mode 100644 ldp/agent/simple_agent.py create mode 100644 ldp/agent/tree_of_thoughts_agent.py create mode 100644 ldp/alg/__init__.py create mode 100644 ldp/alg/algorithms.py create mode 100644 ldp/alg/beam_search.py create mode 100644 ldp/alg/callbacks.py create mode 100644 ldp/alg/datasets.py create mode 100644 ldp/alg/optimizer/__init__.py create mode 100644 ldp/alg/optimizer/ape.py create mode 100644 ldp/alg/optimizer/dqn.py create mode 100644 ldp/alg/optimizer/memory.py create mode 100644 ldp/alg/optimizer/openai_sft_optimizer.py create mode 100644 ldp/alg/optimizer/opt.py create mode 100644 ldp/alg/optimizer/replay_buffers.py create mode 100644 ldp/alg/rollout.py create mode 100644 ldp/alg/runners.py create mode 100644 ldp/alg/tree_search.py create mode 100644 ldp/data_structures.py create mode 100644 ldp/graph/__init__.py create mode 100644 ldp/graph/async_torch.py create mode 100644 ldp/graph/common_ops.py create mode 100644 ldp/graph/gradient_estimators.py create mode 100644 ldp/graph/memory.py create mode 100644 ldp/graph/modules/__init__.py create mode 100644 ldp/graph/modules/llm_call.py create mode 100644 ldp/graph/modules/react.py create mode 100644 ldp/graph/modules/reflect.py create mode 100644 ldp/graph/modules/thought.py create mode 100644 ldp/graph/modules/value_function.py create mode 100644 ldp/graph/op_utils.py create mode 100644 ldp/graph/ops.py create mode 100644 ldp/graph/pydantic_patch.py create mode 100644 ldp/graph/torch_ops.py create mode 100644 ldp/llms/__init__.py create mode 100644 ldp/llms/chat.py create mode 100644 ldp/llms/embeddings.py create mode 100644 ldp/llms/prompts.py create mode 100644 ldp/py.typed create mode 100644 tests/.gitignore create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/test_agents.py create mode 100644 tests/test_algorithms.py create mode 100644 tests/test_buffers.py create mode 100644 tests/test_context_managers.py create mode 100644 tests/test_embeddings.py create mode 100644 tests/test_envs.py create mode 100644 tests/test_gradients.py create mode 100644 tests/test_llms.py create mode 100644 tests/test_memory.py create mode 100644 tests/test_modules.py create mode 100644 tests/test_ops.py create mode 100644 tests/test_optimizer.py create mode 100644 tests/test_prompts.py create mode 100644 tests/test_rollouts.py create mode 100644 tests/test_runners.py create mode 100644 tests/test_torch_ops.py diff --git a/ldp/__init__.py b/ldp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldp/agent/__init__.py b/ldp/agent/__init__.py new file mode 100644 index 00000000..02584a76 --- /dev/null +++ b/ldp/agent/__init__.py @@ -0,0 +1,33 @@ +from enum import StrEnum + + +class DefaultLLMModelNames(StrEnum): + """Defaults for LLM models, pin exact versions for performance stability.""" + + OPENAI = "gpt-4o-2024-08-06" # Cheap, fast, and decent + + +# ruff: noqa: E402 # Avoid circular imports + +from .agent import Agent, AgentConfig +from .agent_client import HTTPAgentClient, make_simple_agent_server +from .dqn_agent import DQNAgent, MultipleCompletionLLMCallOp +from .memory_agent import MemoryAgent +from .react_agent import ReActAgent +from .simple_agent import SimpleAgent, SimpleAgentState +from .tree_of_thoughts_agent import TreeofThoughtsAgent + +__all__ = [ + "Agent", + "AgentConfig", + "DQNAgent", + "DefaultLLMModelNames", + "HTTPAgentClient", + "MemoryAgent", + "MultipleCompletionLLMCallOp", + "ReActAgent", + "SimpleAgent", + "SimpleAgentState", + "TreeofThoughtsAgent", + "make_simple_agent_server", +] diff --git a/ldp/agent/agent.py b/ldp/agent/agent.py new file mode 100644 index 00000000..5dc847d7 --- /dev/null +++ b/ldp/agent/agent.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import json +from abc import ABC, abstractmethod +from collections.abc import Collection, Iterable, Mapping, Sequence +from typing import Any, Generic, TypeVar + +import numpy as np +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from pydantic import BaseModel, ConfigDict, Field, JsonValue + +from ldp.graph.ops import Op, OpResult + +try: + # So we can skip torch objects when looking for Ops + import torch +except ImportError: + # If torch is not available, then it won't be used in an Agent anyway + torch = None # type: ignore[assignment] + +TAgentState = TypeVar("TAgentState") + + +# A global registry of all Agent subclasses, so we can look them up by name +_AGENT_REGISTRY: dict[str, type[Agent]] = {} + + +class Agent(ABC, Generic[TAgentState]): + def __init_subclass__(cls, **kwargs): + """Ensure Ops have unique names and subclasses are in _AGENT_REGISTRY.""" + super().__init_subclass__(**kwargs) + + original_init = cls.__init__ + + def init_with_op_naming(self, *args, **kwargs): + original_init(self, *args, **kwargs) + + # loop through Ops and give them proper names + for name, op in _find_ops(self): + op.set_name(name) + + cls.__init__ = init_with_op_naming # type: ignore[method-assign] + + # Register the Agent subclass. + _AGENT_REGISTRY[cls.__name__] = cls + + @abstractmethod + async def get_asv( + self, agent_state: TAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], TAgentState, float]: + """ + Get new action, state, and value given state and observation messages. + + NOTE: the method's name has action listed before state to help you realize it's + a new state. + + Args: + agent_state: Optional current agent state, pass None if irrelevant. + This can be something like agent memory. + obs: Most recent list of observation messages from the environment's steps. + If more observations than the most recent list are necessary, track them + in the agent state. + + Returns: + Three-tuple of new action, new agent state, and estimated value. The + agent_state is returned as a copy so that you can safely mutate it + without affecting the original. The estimated value is the agent's + estimate of the future rewards given the input state and observations, + and is used for RL training. If estimated value doesn't matter, just + return 0. The value could also come from a Q-value evaluated at the + action chosen by the agent. + """ + + @abstractmethod + async def init_state(self, tools: list[Tool]) -> TAgentState: + """Initializes the first agent state with the provided tools.""" + + def named_ops(self) -> Iterable[tuple[str, Op]]: + """Analogous to torch.nn.Module.named_parameters().""" + return _find_ops(self) + + +class AgentConfig(BaseModel): + """Configuration for specifying the type of agent i.e. the subclass of Agent above.""" + + model_config = ConfigDict(extra="forbid") + + agent_type: str = Field( + description="The type of agent to be used. " + "This should be a subclass of Agent above.", + ) + agent_kwargs: dict[str, JsonValue] = Field( + default_factory=dict, + description="Keyword arguments to pass to the agent's constructor.", + ) + + def construct_agent(self) -> Agent: + return _AGENT_REGISTRY[self.agent_type](**self.agent_kwargs) + + def __hash__(self) -> int: + return hash(self.agent_type + json.dumps(self.agent_kwargs, sort_keys=True)) + + +def _find_ops( # noqa: C901 + root: object, root_name: str = "", visited: set[int] | None = None +) -> Iterable[tuple[str, Op]]: + """Recursive function to find children that are Ops and the attr chain to reach them. + + E.g. if root.module.op is an Op, then we will yield ("module.op", root.module.op). + These are not fully qualified names, but more like "locally qualified names". In the above + example, "root." + "module.op" is the fully qualified name. + This is an internal function - Agent.named_ops() should usually suffice. + + Args: + root: Any object that might hold Ops. + root_name: The name of the root object. Defaults to empty string and is passed as an arg to + make this method recursive. + visited: a set of visited node IDs to avoid loops. Defaults to None. + + Yields: + Two-tuple of (locally qualified name, Op) pairs + """ + # Recursive function to find children that are Ops and the + # attribute chain to reach them. + if visited is None: + visited = set() + + if isinstance(root, Op): + yield root_name, root + # Assume an Op may not have sub-Ops. I think this is sound, since + # we wouldn't be tracking the compute graph properly if it did. + return + + if "__pydantic_parent_namespace__" in root_name: + # Skip Pydantic internals + return + + # Don't recurse into PyTorch objects because they won't contain Ops + if torch is not None and ( # type: ignore[redundant-expr] + isinstance(root, torch.Tensor | torch.nn.Module) + ): + return + + # Similarly for numpy + if isinstance(root, np.ndarray): + return + + # loop through 3 types of containers: dicts, collections, and objects + if isinstance(root, Mapping): + named_attrs: Any = root.items() + elif isinstance(root, Sequence | Collection) and not isinstance(root, str | bytes): + named_attrs = enumerate(root) + elif hasattr(root, "__dict__"): + # object? + named_attrs = root.__dict__.items() + else: + # couldn't descend + return + + for k, v in named_attrs: + id_v = id(v) + if id_v not in visited: + # only visit each object once - avoid loops, etc. + visited.add(id_v) + if root_name: + k = f"{root_name}.{k}" + yield from _find_ops(v, root_name=k, visited=visited) diff --git a/ldp/agent/agent_client.py b/ldp/agent/agent_client.py new file mode 100644 index 00000000..3a385d45 --- /dev/null +++ b/ldp/agent/agent_client.py @@ -0,0 +1,147 @@ +import os +import secrets +from typing import TYPE_CHECKING, Annotated, TypeVar + +import httpx +from aviary.message import Message +from aviary.tools import Messages, Tool, ToolRequestMessage, ToolsAdapter +from pydantic import BaseModel + +from ldp.graph.op_utils import get_training_mode +from ldp.graph.ops import OpResult + +from .agent import Agent +from .simple_agent import SimpleAgentState + +if TYPE_CHECKING: + from fastapi import FastAPI + +TSerializableAgentState = TypeVar("TSerializableAgentState", bound=BaseModel) + + +class HTTPAgentClient(Agent[TSerializableAgentState]): + """Interact with an Agent running in a server via POST requests.""" + + def __init__( + self, + agent_state_type: type[TSerializableAgentState], + server_url: str, + request_headers: httpx._types.HeaderTypes | None = None, + request_timeout: float | None = None, + ): + super().__init__() + self._agent_state_type = agent_state_type + self._request_url = server_url + self._request_headers = request_headers + self._request_timeout = request_timeout + + async def get_asv( + self, + agent_state: TSerializableAgentState, + obs: list[Message], + ) -> tuple[OpResult[ToolRequestMessage], TSerializableAgentState, float]: + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self._request_url}/get_asv", + json={ + "agent_state": agent_state.model_dump(), + "obs": [m.model_dump() for m in obs], + "training": get_training_mode(), + }, + headers=self._request_headers, + timeout=self._request_timeout, + ) + response.raise_for_status() + response_data = response.json() + return ( + OpResult[ToolRequestMessage](**response_data[0]), + self._agent_state_type(**response_data[1]), + response_data[2], + ) + + async def init_state(self, tools: list[Tool]) -> TSerializableAgentState: + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self._request_url}/init_state", + json=ToolsAdapter.dump_python(tools), + headers=self._request_headers, + timeout=self._request_timeout, + ) + response.raise_for_status() + return self._agent_state_type(**response.json()) + + +def make_simple_agent_server( + agent: Agent[SimpleAgentState], render_docs: bool = False +) -> "FastAPI": + """ + Make a FastAPI app designed to work with the above HTTPAgentClient. + + Here's how this works: + 1. There is an entity orchestrating an Agent's interactions with an Environment. + A simple example of this is an integration test that sequentially calls + Agent.get_asv and Environment.step. + 2. That entity is given the above HTTPAgentClient. Any Agent.init_state or + Agent.get_asv calls the orchestration entity makes are actually + POST requests under the hood. The agent's "brains" aren't local. + 3. Remotely, this server code is running, and is where the actual Agent logic lives. + An example of this is a remote server containing GPU(s). + """ + try: + from fastapi import Body, Depends, FastAPI, HTTPException, status + from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer + except ModuleNotFoundError as exc: + raise ImportError( + "Please install aviary with the 'server' extra like so:" + " `pip install aviary[server]`." + ) from exc + + asgi_app = FastAPI( + title=f"aviary.Agent {type(agent).__name__}", + description="Serve inference endpoints for an aviary.Agent with a SimpleAgentState", + # Only render Swagger docs if local since we don't have a login here + docs_url="/docs" if render_docs else None, + redoc_url="/redoc" if render_docs else None, + ) + auth_scheme = HTTPBearer() + + async def validate_token( + token: Annotated[HTTPAuthorizationCredentials, Depends(auth_scheme)], + ) -> HTTPAuthorizationCredentials: + # NOTE: don't use os.environ.get() to avoid possible empty string matches, and + # to have clearer server failures if the AUTH_TOKEN env var isn't present + if not secrets.compare_digest(token.credentials, os.environ["AUTH_TOKEN"]): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect bearer token", + headers={"WWW-Authenticate": "Bearer"}, + ) + return token + + @asgi_app.get("/info") + def info( + _: Annotated[HTTPAuthorizationCredentials, Depends(validate_token)], + ) -> dict[str, str]: + """Get agent metadata, useful for debugging.""" + return {"agent_type": type(agent).__name__} + + @asgi_app.post("/get_asv") + async def get_asv( + agent_state: SimpleAgentState, + obs: Messages, + _: Annotated[HTTPAuthorizationCredentials, Depends(validate_token)], + training: Annotated[bool, Body()] = True, + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + if training: + raise NotImplementedError("Training is not yet supported.") + action, agent_state, vhat = await agent.get_asv(agent_state, obs) + return action, agent_state, vhat + + @asgi_app.post("/init_state") + async def init_state( + tools: list[Tool], + _: Annotated[HTTPAuthorizationCredentials, Depends(validate_token)], + ) -> SimpleAgentState: + return await agent.init_state(tools) + + return asgi_app diff --git a/ldp/agent/dqn_agent.py b/ldp/agent/dqn_agent.py new file mode 100644 index 00000000..b1dfcd7d --- /dev/null +++ b/ldp/agent/dqn_agent.py @@ -0,0 +1,154 @@ +import asyncio +from typing import Any, cast + +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from pydantic import BaseModel, ConfigDict, Field + +from ldp.graph.common_ops import FxnOp, LLMCallOp +from ldp.graph.modules import DQNOp, DQNPolicyModule +from ldp.graph.op_utils import compute_graph, get_call_id +from ldp.graph.ops import OpResult +from ldp.llms import MultipleCompletionLLMModel, prepend_sys + +from . import DefaultLLMModelNames +from .agent import Agent +from .simple_agent import SimpleAgentState + + +class MultipleCompletionLLMCallOp(LLMCallOp): + """Like LLMCallOp, but samples multiple completions (n>1) for a given input.""" + + async def forward( # type: ignore[override] + self, + config: dict, + msgs: list[Message], + tools: list[Tool] | None = None, + tool_choice: Tool + | str + | None = MultipleCompletionLLMModel.TOOL_CHOICE_REQUIRED, + ) -> list[Message]: + model = MultipleCompletionLLMModel(config=config) + + results = await model.call(messages=msgs, tools=tools, tool_choice=tool_choice) + if not results: + raise ValueError("No completions returned from the model.") + + # All completions have the same config, so check the first one + temperature: float = (results[0].config or {}).get("temperature", 1.0) + + # Compute a Monte Carlo estimate of the logprob of this sequence at the given temperature. + logprobs = await asyncio.gather(*[ + self.compute_logprob( + raw_log_p=result.logprob, + temperature=temperature, + model=model, + messages=msgs, + tools=tools, + tool_choice=tool_choice, + ) + for result in results + ]) + + call_id = get_call_id() + self.ctx.update(call_id, "results", results) + # This is the logprob of this sequence according to the raw model, without + # any temperature/top-p distribution shaping. + self.ctx.update(call_id, "raw_logprobs", [result.logprob for result in results]) + + self.ctx.update(call_id, "temperature", temperature) + self.ctx.update(call_id, "logprob", logprobs) + + return [cast(list[Message], result.messages)[0] for result in results] + + # set the return type to list[Message], not LLMCallOp's inherited Message + async def __call__( # type: ignore[override] + self, *args, **kwargs + ) -> OpResult[list[Message]]: + return await super().__call__(*args, **kwargs) # type: ignore[return-value] + + async def compute_logprob( + self, + raw_log_p: float | None, + temperature: float, + model: MultipleCompletionLLMModel, + **model_kwargs, + ) -> float | None: + """This method computes a Monte Carlo estimate of logprob for a given temperature.""" + return None # TODO: finish implementing this using n>1 + + +class DQNAgent(BaseModel, Agent[SimpleAgentState]): + """An agent that trains a state-action value function Q(s,a) to select actions. + + This a modification of traditional DQNs [1]. When using a vanilla DQN, the action + space is assumed to be enumerable, enabling greedy action selection. When sampling + from an LLM, the action space is large. So instead, we sample from the LLM and use + the Q network to score the sampled actions. We are then greedy among the sampled + actions. + + [1] https://arxiv.org/abs/1312.5602 + """ + + # Not frozen because we want to change num_actions_to_sample + model_config = ConfigDict(frozen=False, arbitrary_types_allowed=True) + + num_actions_to_sample: int = Field( + default=1, + description="Number of actions to sample from the LLM. " + "If >1, the Q function will be used to select the " + "highest-scoring action.", + ) + + llm_model: dict[str, Any] = Field( + default={"model": DefaultLLMModelNames.OPENAI.value, "temperature": 1.0}, + description="Model configuration (not trained). " + "Setting a high default temperature to encourage exploration.", + ) + sys_prompt: str = Field( + default="Using tools, complete the given task.", + description="System prompt. Trainable", + ) + + def __init__( + self, + dqn: DQNOp | None = None, + epsilon: float = 0.0, + actions_only: bool = False, + **kwargs, + ): + super().__init__(**kwargs) + + self._prepend = FxnOp(prepend_sys) + self._llm_call = MultipleCompletionLLMCallOp() + self._action_splitter = FxnOp[Message](lambda x, i: x[i]) + self._dqn_policy = DQNPolicyModule[ToolRequestMessage]( + dqn=dqn, epsilon=epsilon, actions_only=actions_only + ) + + async def init_state(self, tools: list[Tool]) -> SimpleAgentState: + return SimpleAgentState(tools=tools) + + @compute_graph() + async def get_asv( + self, agent_state: SimpleAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + new_state = agent_state.get_next_state(obs) + + msgs = await self._prepend(new_state.messages, sys_content=self.sys_prompt) + sampled_actions: OpResult[list[Message]] = await self._llm_call( + # Override config's n. Also make sure caching=False, since we always want + # stochastic samples from the LLM for the DQN. + self.llm_model | {"n": self.num_actions_to_sample, "caching": False}, + msgs=msgs, + tools=agent_state.tools, + ) + split_actions: list[OpResult[Message]] = await asyncio.gather(*[ + self._action_splitter(sampled_actions, i) + for i in range(self.num_actions_to_sample) + ]) + + best_q, best_action = await self._dqn_policy(msgs, *split_actions) # type: ignore[arg-type] + new_state.messages = [*new_state.messages, best_action.value] + + return best_action, new_state, best_q diff --git a/ldp/agent/memory_agent.py b/ldp/agent/memory_agent.py new file mode 100644 index 00000000..81a2d944 --- /dev/null +++ b/ldp/agent/memory_agent.py @@ -0,0 +1,100 @@ +"""This module defines the MemoryAgent class, which extends a base agent model with memory. + +capabilities. The MemoryAgent can pick and invoke tools based on the stored and retrieved +memories, formatted using specified prompts. A memory is typically a set of previous trajectories +""" + +from typing import cast + +from aviary.message import Message +from aviary.tools import ToolRequestMessage +from pydantic import ConfigDict, Field + +from ldp.graph.common_ops import FxnOp, MemoryOp, PromptOp +from ldp.graph.memory import Memory, MemoryModel +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult +from ldp.llms.prompts import indent_xml + +from .simple_agent import SimpleAgent, SimpleAgentState + + +class MemoryAgent(SimpleAgent): + """ + Simple agent that can pick and invoke tools with memory. + + NOTE: the MemoryAgent does not maintain an explicit value estimate, + it simply supplies previous trajectories via the prompt. + As such, the value estimate vhat will always be zero. + """ + + prompt: str = Field( + default=( + "\n\n" + "These are relevant memories from previous attempts at similar tasks, " + "along with the action taken and the discounted cumulative reward from that action. " + "A negative reward is failure, a positive reward is success.\n" + "{memories}\n\n" + "Considering the memories, choose the next action." + ), + description="Prompt that includes the memories.", + ) + memory_prompt: str = Field( + default="{input}{output}{value}", + description="Prompt for formatting an individual memory. " + "Use XML instead of JSON to avoid potential escaping issues.", + ) + num_memories: int = Field( + default=MemoryModel.DEFAULT_MEMORY_MATCHES, + description="Number of memories to retrieve from MemoryOp", + ) + # Freeze to ensure the only mutation happens in either the agent state (which is + # passed around) or in the internal Ops + model_config = ConfigDict(frozen=True) + + @staticmethod + def _parse_memory(prompt: str, memories: list[Memory]) -> str: + return indent_xml( + "\n".join([ + prompt.format(**m.model_dump(exclude={"call_id"})) for m in memories + ]) + ) + + @staticmethod + def _package_messages( + msgs: list[Message], memory_prompt: str, use_memories: bool + ) -> list[Message]: + if use_memories: + return [*msgs, Message(content=memory_prompt)] + return msgs + + def __init__(self, memory_model: MemoryModel | None = None, **kwargs): + super().__init__(**kwargs) + self._memory_op = MemoryOp(memory_model) + self._format_memory_op = FxnOp(self._parse_memory) + self._prompt_op = PromptOp(self.prompt) + self._package_op = FxnOp(self._package_messages) + + @compute_graph() + async def get_asv( + self, agent_state: SimpleAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + next_state = agent_state.get_next_state(obs) + + query = "\n\n".join([str(m) for m in next_state.messages if m.role != "system"]) + memories = await self._memory_op(query, matches=self.num_memories) + packaged_messages = await self._package_op( + next_state.messages, + memory_prompt=await self._prompt_op( + memories=await self._format_memory_op(self.memory_prompt, memories) + ), + use_memories=bool(memories.value), + ) + result = cast( + OpResult[ToolRequestMessage], + await self._llm_call_op( + await self._config_op(), msgs=packaged_messages, tools=next_state.tools + ), + ) + next_state.messages = [*next_state.messages, result.value] + return result, next_state, 0.0 diff --git a/ldp/agent/react_agent.py b/ldp/agent/react_agent.py new file mode 100644 index 00000000..5484a3b3 --- /dev/null +++ b/ldp/agent/react_agent.py @@ -0,0 +1,140 @@ +import logging +from typing import Any, Self, cast + +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage, ToolResponseMessage +from pydantic import BaseModel, ConfigDict, Field +from tenacity import ( + Future, + RetryCallState, + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, +) + +from ldp.graph.modules.react import ( + ACT_DEFAULT_PROMPT_TEMPLATE, + REACT_DEFAULT_PROMPT_TEMPLATE, + MalformedMessageError, + ReActModule, + ToolDescriptionMethods, +) +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult + +from . import DefaultLLMModelNames +from .agent import Agent +from .simple_agent import SimpleAgentState + +logger = logging.getLogger(__name__) + + +class ReActAgent(BaseModel, Agent[SimpleAgentState]): + """An Act or ReAct Agent built to work with chat models. + + Paper: https://arxiv.org/abs/2210.03629 + + The ReAct style is like so, and note Act style has no 'Thought: ' entries: + System: + Answer the following questions as best you can. You have access to the following tools: + + {tools} + + Use the following format: + + Thought: you should always think about what to do + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can repeat N times) + User: + {questions} + Assistant: + Thought: + Action: + Action Input: + User: + Observation: + Assistant: + Thought: + Action: + Action Input: + ... + + One notable design decision is that ReAct's state does not necessarily + track ToolRequestMessage. Recall that aviary is in a partially observable + domain, meaning we don't need to have perfect symmetry with Environments. + Instead, ReActAgent's state stores a ReAct-style message history, where the + messages are plain Message (and not a ToolRequestMessage). + """ + + # Freeze to ensure the only mutation happens in either the agent state (which is + # passed around) or in the internal Ops + model_config = ConfigDict(frozen=True) + + llm_model: dict[str, Any] = Field( + default={ + "model": DefaultLLMModelNames.OPENAI.value, + "temperature": 0.1, + "logprobs": True, + "top_logprobs": 1, + }, + description="Starting configuration for the LLM model.", + ) + sys_prompt: str = Field( + default=REACT_DEFAULT_PROMPT_TEMPLATE, + description="Learnable system prompt template, defaults to ReAct.", + ) + tool_description_method: ToolDescriptionMethods = Field( + default=ToolDescriptionMethods.STR, + description="Method used to describe the tools, defaults to 'str' description.", + ) + + @classmethod + def make_act_agent(cls, **kwargs) -> Self: + return cls(sys_prompt=ACT_DEFAULT_PROMPT_TEMPLATE, **kwargs) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._react_module = ReActModule( + self.llm_model, self.sys_prompt, self.tool_description_method + ) + + async def init_state(self, tools: list[Tool]) -> SimpleAgentState: + return SimpleAgentState(tools=tools) + + @staticmethod + def after_retry_failure_log(retry_state: RetryCallState): + logger.error( + f"Failed across {retry_state.attempt_number} attempts to run get_asv given" + f" arguments {retry_state.args} and kwargs {retry_state.kwargs}." + ) + # NOTE: this blows up with the underlying exception... it isn't wrapped in a + # RetryError like normal tenacity + return cast(Future, retry_state.outcome).result() + + @retry( + retry=retry_if_exception_type(MalformedMessageError), + before_sleep=before_sleep_log(logger, logging.WARNING), + stop=stop_after_attempt(5), + retry_error_callback=after_retry_failure_log, + ) + @compute_graph() + async def get_asv( + self, agent_state: SimpleAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + next_state = agent_state.get_next_state( + obs=[ + Message(content=f"Observation: {m.content}") + if isinstance(m, ToolResponseMessage) + else m + for m in obs + ] + ) + + final_result, react_message = await self._react_module( + messages=next_state.messages, tools=next_state.tools + ) + next_state.messages = [*next_state.messages, react_message] + return final_result, next_state, 0.0 diff --git a/ldp/agent/simple_agent.py b/ldp/agent/simple_agent.py new file mode 100644 index 00000000..961cf39d --- /dev/null +++ b/ldp/agent/simple_agent.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from typing import Any, Self, cast + +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage, ToolResponseMessage +from pydantic import BaseModel, ConfigDict, Field + +from ldp.graph.common_ops import ConfigOp, LLMCallOp +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult +from ldp.llms import prepend_sys + +from . import DefaultLLMModelNames +from .agent import Agent + + +class SimpleAgentState(BaseModel): + """Simple bucket for an Agent to access tools and store messages.""" + + tools: list[Tool] = Field(default_factory=list) + messages: list[ToolRequestMessage | ToolResponseMessage | Message] = Field( + default_factory=list + ) + + def get_next_state( + self, + obs: list[Message] | None = None, + tools: list[Tool] | None = None, + **kwargs, + ) -> Self: + """ + Get the next agent state without mutating the optional prior state. + + Do not mutate self here, just read from it. + + Args: + obs: Optional observation messages to use in creating the next state. + tools: Optional list of tools available to the agent. If unspecified, these + should be pulled from the prior_state. + kwargs: Additional keyword arguments to pass to this class's constructor. + + Returns: + The next agent state (which is not an in-place change to self). + """ + return type(self)( + tools=tools if tools is not None else self.tools, + messages=self.messages + (obs or []), + **kwargs, + ) + + +class SimpleAgent(BaseModel, Agent[SimpleAgentState]): + """Simple agent that can pick and invoke tools with a language model. + + It does not have a system prompt because it's meant to be lightweight. + """ + + # Freeze to ensure the only mutation happens in either the agent state (which is + # passed around) or in the internal Ops + model_config = ConfigDict(frozen=True) + + llm_model: dict[str, Any] = Field( + default={"model": DefaultLLMModelNames.OPENAI.value, "temperature": 0.1}, + description="Starting configuration for the LLM model. Trainable.", + ) + sys_prompt: str | None = Field( + default=None, + description=( + "Opt-in system prompt. If one is passed, the system prompt is not set up to" + " be trainable, because this class is meant to be quite simple as far as" + " possible hyperparameters." + ), + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._config_op = ConfigOp[dict](config=self.llm_model) + self._llm_call_op = LLMCallOp() + + async def init_state(self, tools: list[Tool]) -> SimpleAgentState: + return SimpleAgentState(tools=tools) + + @compute_graph() + async def get_asv( + self, agent_state: SimpleAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + next_state = agent_state.get_next_state(obs) + + messages = ( + prepend_sys(next_state.messages, sys_content=self.sys_prompt) + if self.sys_prompt is not None + else next_state.messages + ) + result = cast( + OpResult[ToolRequestMessage], + await self._llm_call_op( + await self._config_op(), msgs=messages, tools=next_state.tools + ), + ) + next_state.messages = [*next_state.messages, result.value] + return result, next_state, 0.0 diff --git a/ldp/agent/tree_of_thoughts_agent.py b/ldp/agent/tree_of_thoughts_agent.py new file mode 100644 index 00000000..d6c8d77a --- /dev/null +++ b/ldp/agent/tree_of_thoughts_agent.py @@ -0,0 +1,142 @@ +"""Module for the Tree of Thoughts agent. + +This module defines the Tree of Thoughts agent which uses a language model to generate and evaluate possible +steps in a puzzle or problem-solving environment. The agent employs a tree search mechanism to explore different +solutions and selects the most promising ones based on evaluations. + +This module is based on the following paper: https://openreview.net/forum?id=5Xc1ecxO1h + +Note: TreeofThoughtsAgent is currently tested as a baseline agent for Game of 24. It does not yet support tool calls +that operate on the intermediate reasoning steps. This would probably entail a redefinition of the POMDP to +undertake intermediate reasoning steps as environment steps. +""" + +import logging +from collections.abc import Callable +from typing import Any + +from aviary.message import Message +from aviary.tools import Tool, ToolCall, ToolRequestMessage +from pydantic import BaseModel, ConfigDict, Field + +from ldp.graph.common_ops import FxnOp, LLMCallOp +from ldp.graph.op_utils import compute_graph, get_call_id, op_call +from ldp.graph.ops import OpResult +from ldp.llms import prepend_sys + +from . import DefaultLLMModelNames +from .agent import Agent +from .simple_agent import SimpleAgentState + +logger = logging.getLogger(__name__) + + +class TreeofThoughtsAgent(BaseModel, Agent[SimpleAgentState]): + """Tree of Thoughts Agent. + + This agent uses a tree search mechanism combined with an LLM to generate and evaluate + possible steps in a problem-solving environment. It is designed to explore different solutions + and select the most promising ones based on a heuristic evaluation function. + """ + + # Freeze to ensure the only mutation happens in either the agent state (which is + # passed around) or in the internal Ops + model_config = ConfigDict(frozen=True) + + llm_model: dict[str, Any] = Field( + default={"model": DefaultLLMModelNames.OPENAI.value, "temperature": 0.1}, + description="Starting configuration for the LLM model.", + ) + value_prompt_func: Callable[[str, str], str] = Field( + default=lambda x, y: f"Value prompt for input: {x}, current path: {y}", + description="Function to format value prompt template.", + ) + proposal_prompt_func: Callable[[str, str], str] = Field( + default=lambda x, y: f"Proposal prompt for input: {x}, current path: {y}", + description="Function to format proposal prompt template.", + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._prepend_op = FxnOp(prepend_sys) + self._llm_call_op = LLMCallOp() + + async def init_state(self, tools: list[Tool]) -> SimpleAgentState: + return SimpleAgentState(tools=tools) + + @compute_graph() + async def get_asv( # type: ignore[override] + self, + agent_state: SimpleAgentState, + obs: list[Message], + eval_function: Callable[[str, list[str]], float], + n_steps: int = 0, + n_select_samples: int = 0, + ) -> tuple[OpResult[ToolRequestMessage], SimpleAgentState, float]: + """Generate and evaluate possible steps in the problem-solving process. + + Args: + agent_state: The current state of the agent. + obs: The observations provided to the agent. + eval_function: Function to evaluate the generated paths in the tree. + n_steps: Number of steps to generate. Defaults to 0. Dictated by the environment. + n_select_samples: Number of tree nodes to select to explore in each step. Defaults to 0. + + Returns: + The result of the operation, the new state of the agent, and the number representing the value (0). + """ + new_state = agent_state.get_next_state() + + x = str(obs[0].content) # Current problem input + current_paths = [""] # current candidate paths through the tree + + for step in range(n_steps): + logger.info(f"Step {step}") + + # propose candidate paths + candidate_paths = [] + for path in current_paths: + proposal_prompt_init = self.proposal_prompt_func(x, path) + proposal_msgs = await self._prepend_op( + new_state.messages, sys_content=proposal_prompt_init + ) + proposal = await self._llm_call_op(self.llm_model, msgs=proposal_msgs) + # Append candidate paths to the current paths + candidate_paths += [ + path + _ + "\n" + for _ in (proposal.value.content or "").split("\n") + if _ + ] + + # score candidate paths + values = [] + for path in candidate_paths: + value_prompt_init = self.value_prompt_func(x, path) + value_msgs = await self._prepend_op( + new_state.messages, sys_content=value_prompt_init + ) + value_outputs = await self._llm_call_op(self.llm_model, msgs=value_msgs) + values.append(eval_function(path, [value_outputs.value.content or ""])) + + # greedy selection + values_with_index = [(v, i) for i, v in enumerate(values)] + sorted_values = sorted(values_with_index, key=lambda x: x[0], reverse=True) + select_ids = [i for _, i in sorted_values[:n_select_samples]] + select_new_paths = [candidate_paths[select_id] for select_id in select_ids] + current_paths = select_new_paths + + # Generate tool calls for the selected answer + tool_calls = [ + ToolCall.from_tool(tool, *[current_paths[0]]) for tool in new_state.tools + ] + result = ToolRequestMessage(content=current_paths[0], tool_calls=tool_calls) + + new_state.messages = [*new_state.messages, result] + async with op_call(): + op_result: OpResult[ToolRequestMessage] = OpResult( + call_id=get_call_id(), + op_name="TreeofThoughtsAgentOp", + op_class_name=type(self).__name__, + value=result, + ) + return op_result, new_state, 0.0 diff --git a/ldp/alg/__init__.py b/ldp/alg/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldp/alg/algorithms.py b/ldp/alg/algorithms.py new file mode 100644 index 00000000..567ded74 --- /dev/null +++ b/ldp/alg/algorithms.py @@ -0,0 +1,167 @@ +import itertools +from collections.abc import Sequence +from typing import Any + +import networkx as nx +from aviary.message import Message, join +from aviary.tools import Tool, ToolRequestMessage + +from ldp.graph.ops import GradOutType, OpResult + + +def discounted_returns( + rewards: list[float], terminated: list[bool], discount: float = 1.0 +) -> list[float]: + r""" + Calculate the discounted returns for a list of rewards, considering termination flags and a discount factor. + + The discounted return represents the future discounted rewards from each time step onwards, taking into account + whether an episode has terminated at each step. + + The discounted return \( G_t \) is given by: + + .. math:: + G_t = \sum_{k=1}^{\infty} \gamma^{k-1} R_{t+k} + + where: + - \( G_t \) is the discounted return starting from time step \( t \). + - \( \gamma \) is the discount factor. + - \( R_{t+k} \) is the reward received at time step \( t+k \). + + + Args: + rewards: A list of rewards at each time step. + terminated: A list of boolean flags indicating whether the episode terminated at each time step. + discount: Discount factor to apply to future rewards. Defaults to 1.0 which means no discounting is applied. + + Returns: + A list of discounted returns (rewards to go), with each element representing the + total discounted reward from that step onwards. + + Example: + >>> rewards = [1.0, 2.0, 3.0] + >>> terminated = [False, False, True] + >>> discounted_returns(rewards, terminated, discount=0.9) + [5.23, 4.7, 3.0] + """ + returns = [] + r = 0.0 + for reward, term in zip(reversed(rewards), reversed(terminated), strict=False): + # 1 - term is 0 if the episode has terminated + r = reward + discount * r * (1 - term) + returns.append(r) + returns.reverse() + return returns + + +def to_network( # noqa: C901 + op_result: OpResult, + max_label_height: int | None = None, + max_label_width: int | None = None, + G: "nx.MultiDiGraph | None" = None, +) -> "nx.MultiDiGraph": + """ + Populate a NetworkX graph from the input op result's computation graph. + + How to export Graphviz .dot file: nx.drawing.nx_pydot.write_dot(G, "file.dot") + How to render with Graphviz: nx.drawing.nx_pydot.to_pydot(G).write_png("file.png") + Online Graphviz renderer: https://dreampuf.github.io/GraphvizOnline/ + + Args: + op_result: Starting op result to recurse parent op calls and results. + max_label_height: Optional max label height (lines). + max_label_width: Optional max label width (chars). + G: Optional graph to add nodes/edges to. Allows this to be a recursive function. + + Returns: + Populated a NetworkX multi-edge directed graph. + """ + + def gvizify(x: Any) -> str: + """Stringify and then escape colons for Graphviz labels.""" + if isinstance(x, OpResult): + x = x.value + if isinstance(x, Sequence): + if isinstance(x[0], Message): + x = join(x) + elif isinstance(x[0], Tool): + x = "\n".join(f"Tool {t.info.name}" for t in x) + elif isinstance(x, ToolRequestMessage): + # reformatting tool calls to make them easier to read + x = str(x).split(" for tool calls: ") + x = "\n".join(x).replace("; ", "\n") + result = ( + "\n".join( + # Replace double quotes since they can interfere with colon escapes + # Strip here to avoid trailing spaces in the labels + x_line[:max_label_width].replace('"', "'").strip() + for i, x_line in enumerate(str(x).split("\n")) + if not max_label_height or i < max_label_height + ) + ).strip() # Remove trailing newlines + return result if ":" not in result else f'"{result}"' # Escape colons + + call_id = op_result.call_id + assert ( + call_id is not None + ), "to_network currently assumes a compute graph is available" + ctx = op_result.ctx + + op_result_str = gvizify(op_result) + op_result_node = gvizify(f"{op_result_str}\n{call_id.fwd_id}") + if G is None: + # TODO: figure out a way to use OpResult.get_compute_graph(), which builds + # a nx.DiGraph. + G = nx.MultiDiGraph() + + op_call_str = gvizify(f"{ctx.op_name}:{call_id.fwd_id}") + if op_call_str in G: + # We have already visited this node - can skip. + return G + + G.add_node(op_result_node, style="dotted", label=op_result_str) + G.add_edge(op_call_str, op_result_node) + + if ( + result_grad := ctx.get(key="grad_output", call_id=call_id, default=None) + ) is not None: + G.add_edge( + op_result_node, + op_call_str, + label=gvizify(result_grad), + style="dotted", + ) + + input_args, input_kwargs = op_result.inputs + grads = ctx.get(key="grad_input", call_id=call_id, default=None) + if grads is None: + arg_grads: list[GradOutType | None] = [None] * len(input_args) + kwarg_grads: dict[str, GradOutType | None] = dict.fromkeys(input_kwargs) + else: + arg_grads, kwarg_grads = grads + + args_and_grads = itertools.chain( + zip(input_args, arg_grads, strict=True), + ((arg, kwarg_grads[key]) for key, arg in input_kwargs.items()), + ) + + for arg, grad in args_and_grads: + arg_str = gvizify(arg) + + if not isinstance(arg, OpResult): + G.add_node(arg_str, style="dotted") + + else: + arg_str = gvizify(f"{arg_str}\n{arg.call_id.fwd_id}") + G = to_network( + arg, + max_label_height=max_label_height, + max_label_width=max_label_width, + G=G, + ) + + G.add_edge(arg_str, op_call_str) + if grad is not None: + G.add_edge(op_call_str, arg_str, label=gvizify(grad), style="dotted") + + return G diff --git a/ldp/alg/beam_search.py b/ldp/alg/beam_search.py new file mode 100644 index 00000000..6dc0a4b2 --- /dev/null +++ b/ldp/alg/beam_search.py @@ -0,0 +1,218 @@ +import asyncio +import uuid +from collections.abc import Awaitable, Callable, Sequence +from contextlib import suppress +from typing import NamedTuple + +from aviary.env import Environment + +from ldp.agent.agent import Agent, TAgentState +from ldp.alg.callbacks import Callback +from ldp.alg.rollout import AgentError, EnvError, TEnv, reraise_exc_as +from ldp.data_structures import Trajectory, Transition + + +class Beam(NamedTuple): + # An ongoing beam contains two things: the trajectory up to now + # and the environment that the last action was sampled from. We + # need both to continue sampling the next step. + traj: Trajectory + env: Environment + + +class BeamSearchRollout: + def __init__( + self, + agent: Agent, + beam_width: int, + samples_per_beam: int, + env_clone_fn: Callable[[TEnv], Awaitable[TEnv]], + agent_clone_fn: Callable[[TAgentState], TAgentState], + scoring_fn: Callable[[Trajectory], Awaitable[float]], + replay_actions_on_clone: bool = False, + callbacks: Sequence[Callback] | None = None, + catch_agent_failures: bool = True, + catch_env_failures: bool = True, + verbose: bool = False, + ): + self.agent = agent + + self.catch_agent_failures = catch_agent_failures + self.catch_env_failures = catch_env_failures + + self.verbose = verbose + + self.traj_buffer: dict[str, Trajectory] = {} + self.search_buffer: dict[str, list[Trajectory]] = {} + + self.beam_width = beam_width + self.samples_per_beam = samples_per_beam + + self.env_clone_fn = env_clone_fn + self.agent_clone_fn = agent_clone_fn + self.scoring_fn = scoring_fn + self.replay_actions_on_clone = replay_actions_on_clone + + self.callbacks = callbacks or [] + + async def sample_trajectories( + self, + environments: Sequence[Environment], + max_steps: int | None = None, + ) -> list[Trajectory]: + self.traj_buffer.clear() + traj_ids = [uuid.uuid4().hex for _ in environments] + + tasks = [ + self._rollout(traj_id, env, max_steps) + for traj_id, env in zip(traj_ids, environments, strict=True) + ] + await asyncio.gather(*tasks) + + return [self.traj_buffer[traj_id] for traj_id in traj_ids] + + async def _rollout( + self, + traj_id: str, + env: Environment, + max_steps: int | None, + ): + with suppress(AgentError, EnvError): + # for samples_per_beam==1. we want to ensemble and pick the highest-scoring one + n_seeds = 1 if self.samples_per_beam > 1 else self.beam_width + + done_beams: list[Beam] = [] + beams = [ + Beam(traj=Trajectory(traj_id=f"{traj_id}:{i}"), env=env) + for i in range(n_seeds) + ] + # will be replaced if rollout is successful + self.traj_buffer[traj_id] = beams[0].traj + self.search_buffer[traj_id] = [] + + with reraise_exc_as(EnvError, self.catch_env_failures): + init_obs, tools = await env.reset() + + with reraise_exc_as(AgentError, self.catch_agent_failures): + seed_agent_states = await asyncio.gather( + *(self.agent.init_state(tools) for _ in range(n_seeds)) + ) + + while len(done_beams) < self.beam_width and beams: + new_beams = [] + for beam, seed_agent_state in zip( + beams, seed_agent_states, strict=True + ): + for i_sample in range(self.samples_per_beam): + new_env = await self._clone_env(beam) + if new_env is None: + continue + + agent_state = self.agent_clone_fn(seed_agent_state) + obs = ( + beam.traj.steps[-1].next_observation + if beam.traj.steps + else init_obs.copy() + ) + + await asyncio.gather(*[ + callback.before_transition( + traj_id, self.agent, env, agent_state, obs + ) + for callback in self.callbacks + ]) + + with reraise_exc_as(AgentError, self.catch_agent_failures): + ( + action, + next_agent_state, + vhat, + ) = await self.agent.get_asv(agent_state, obs) + await asyncio.gather(*[ + callback.after_agent_get_asv( + traj_id, action, next_agent_state, vhat + ) + for callback in self.callbacks + ]) + + with reraise_exc_as(EnvError, self.catch_env_failures): + next_obs, reward, done, trunc = await new_env.step( + action.value + ) + await asyncio.gather(*[ + callback.after_env_step( + traj_id, next_obs, reward, done, trunc + ) + for callback in self.callbacks + ]) + + step = Transition( + timestep=len(beam.traj.steps), + agent_state=agent_state, + next_agent_state=next_agent_state, + observation=obs, + next_observation=next_obs, + action=action, + reward=reward, + done=done, + truncated=trunc, + value=0.0, # will be filled in + ) + await asyncio.gather(*[ + callback.after_transition(traj_id, self.agent, env, step) + for callback in self.callbacks + ]) + + new_beam = Beam( + traj=Trajectory( + traj_id=beam.traj.traj_id + f":{i_sample}", # type: ignore[operator] + steps=[*beam.traj.steps, step], + ), + env=new_env, + ) + step.value = await self.scoring_fn(new_beam.traj) + self.search_buffer[traj_id].append(new_beam.traj) + + if ( + not new_beam.traj.done + and max_steps is not None + and len(new_beam.traj.steps) >= max_steps + ): + last_step = new_beam.traj.steps[-1] + last_step.done = last_step.truncated = True + + if new_beam.traj.done: + done_beams.append(new_beam) + else: + new_beams.append(new_beam) + + new_beams.sort(key=lambda b: b.traj.steps[-1].value, reverse=True) + beams, discarded = ( + new_beams[: self.beam_width], + new_beams[self.beam_width :], + ) + seed_agent_states = [b.traj.steps[-1].next_agent_state for b in beams] + await asyncio.gather(*[d.env.close() for d in discarded]) + + await asyncio.gather(*[b.env.close() for b in beams]) + + self.traj_buffer[traj_id] = max( + done_beams, + key=lambda b: (b.traj.steps[-1].truncated, b.traj.steps[-1].value), + ).traj + + async def _clone_env(self, beam: Beam) -> Environment | None: + try: + with reraise_exc_as(EnvError, self.catch_env_failures): + # I'm not sure how to type hint this + env = await self.env_clone_fn(beam.env) # type: ignore[arg-type] + if self.replay_actions_on_clone: + # Some envs can't be cloned, so instead replay. + # We rely on env_clone_fn to properly reset the env if needed. + # We assume a deterministic env, so the return values are discarded. + for step in beam.traj.steps: + if step.action is not None: + _ = await env.step(step.action.value) + return env + except EnvError: + return None diff --git a/ldp/alg/callbacks.py b/ldp/alg/callbacks.py new file mode 100644 index 00000000..4e473f81 --- /dev/null +++ b/ldp/alg/callbacks.py @@ -0,0 +1,377 @@ +import json +import logging +import os +import time +from collections import defaultdict +from collections.abc import Callable, Sequence +from functools import partial +from pathlib import Path +from typing import Any + +import aiofiles +from aviary.env import Environment, TaskDataset +from aviary.message import Message +from aviary.tools import MessagesAdapter, ToolRequestMessage + +from ldp.agent import Agent +from ldp.data_structures import Trajectory, Transition +from ldp.graph.ops import OpResult + +try: + import wandb +except ImportError: + wandb = None # type: ignore[assignment] + +logger = logging.getLogger(__name__) + + +class Callback: + """Base class for callbacks used by RolloutManager/Evaluator/OnlineTrainer. + + Pseudocode to demonstrate how callback methods are invoked (marked as *): + + RolloutManager.sample_trajectories(): + while not done: + callback.before_transition() * + agent.get_asv() + callback.after_agent_get_asv() * + env.step() + callback.after_env_step() * + callback.after_transition() * + + Evaluator.evaluate / OnlineTrainer._eval_loop(): + callback.before_eval_loop() * + for batch in eval_dataset: + rollout_manager.sample_trajectories() + callback.after_eval_step() * + callback.after_eval_loop() * + + OfflineTrainer / OnlineTrainer.train(): + for batch in train_dataset: + rollout_manager.sample_trajectories() # if online + optimizer.aggregate() + if updating_optimizer: + optimizer.update() + callback.after_update() * + callback.after_train_step() * + """ + + async def before_transition( + self, + traj_id: str, + agent: Agent, + env: Environment, + agent_state: Any, + obs: list[Message], + ) -> None: + """Invoked by RolloutManager before each transition.""" + + async def after_agent_get_asv( + self, + traj_id: str, + action: OpResult[ToolRequestMessage], + next_agent_state: Any, + value: float, + ): + """Invoked by RolloutManager after agent.get_asv().""" + + async def after_env_step( + self, traj_id: str, obs: list[Message], reward: float, done: bool, trunc: bool + ): + """Invoked by RolloutManager after env.step().""" + + async def after_transition( + self, traj_id: str, agent: Agent, env: Environment, transition: Transition + ) -> None: + """Invoked by RolloutManager after each transition.""" + + async def after_train_step(self, trajectories: Sequence[Trajectory]) -> None: + """Invoked by OnlineTrainer after each training step.""" + + async def before_eval_loop(self) -> None: + """Invoked by Evaluator and OnlineTrainer before the evaluation loop.""" + + async def after_eval_step(self, trajectories: Sequence[Trajectory]) -> None: + """Invoked by Evaluator and OnlineTrainer after each evaluation step.""" + + async def after_eval_loop(self) -> None: + """Invoked by Evaluator and OnlineTrainer after the evaluation loop.""" + + async def after_update(self) -> None: + """Invoked by OnlineTrainer after each optimizer.update() call.""" + + +class TrajectoryFileCallback(Callback): + """Callback that writes trajectories to a file.""" + + def __init__(self, output_dir: os.PathLike | str): + self.output_dir = Path(output_dir) + self.output_dir.mkdir(parents=True, exist_ok=True) + + self.out_files: dict[str, Path] = {} + self.trajs: dict[str, Trajectory] = defaultdict(Trajectory) + + def _make_filename(self, traj_id: str, env: Environment) -> str: + """Create the filename for the output file.""" + return f"{traj_id}.jsonl" + + async def before_transition( + self, + traj_id: str, + agent: Agent, + env: Environment, + agent_state: Any, + obs: list[Message], + ) -> None: + if traj_id not in self.out_files: + self.out_files[traj_id] = self.output_dir / self._make_filename( + traj_id, env + ) + + async def after_transition( + self, traj_id: str, agent: Agent, env: Environment, transition: Transition + ) -> None: + assert traj_id in self.out_files + traj = self.trajs[traj_id] + traj.steps.append(transition) + # TODO: make this async? + traj.to_jsonl(self.out_files[traj_id]) + + def cleanup(self) -> None: + for out_file in self.out_files.values(): + if out_file.exists(): + out_file.unlink() + + +class RolloutDebugDumpCallback(Callback): + """Writes rollout debug info to an output directory.""" + + def __init__(self, output_dir: os.PathLike | str): + self.output_dir = Path(output_dir) + self.output_dir.mkdir(parents=True, exist_ok=True) + + self.out_files: dict[str, Path] = {} + + def _get_out_file(self, traj_id: str) -> Path: + if traj_id not in self.out_files: + self.out_files[traj_id] = self.output_dir / f"{traj_id}.jsonl" + return self.out_files[traj_id] + + async def before_transition( + self, + traj_id: str, + agent: Agent, + env: Environment, + agent_state, + obs: list[Message], + ) -> None: + self.start = time.time() + + def _get_time_elapsed(self) -> float: + elapsed = time.time() - self.start + self.start = time.time() + return elapsed + + async def after_agent_get_asv( + self, + traj_id: str, + action: OpResult[ToolRequestMessage], + next_agent_state: Any, + value: float, + ) -> None: + log = { + "event": "AGENT_GET_ASV", + "elapsed": self._get_time_elapsed(), + "action": action.value.model_dump(), + "value": value, + } + async with aiofiles.open(self._get_out_file(traj_id), "a") as f: + await f.write(json.dumps(log) + "\n") + + async def after_env_step( + self, + traj_id: str, + obs: list[Message], + reward: float, + done: bool, + trunc: bool, + ): + log = { + "event": "ENV_STEP", + "elapsed": self._get_time_elapsed(), + "obs": MessagesAdapter.dump_python(obs), + "reward": reward, + "done": done, + "truncated": trunc, + } + async with aiofiles.open(self._get_out_file(traj_id), "a") as f: + await f.write(json.dumps(log) + "\n") + + +class ComputeTrajectoryMetricsMixin: + """Mixin for TaskDataset classes to enable them to compute metrics.""" + + def compute_trajectory_metrics( + self, + trajectories: Sequence[Trajectory], + ) -> dict[str, list[float]]: + return { + "reward": [ + sum(step.reward for step in traj.steps) for traj in trajectories + ], + "truncation_rate": [ + sum(step.truncated for step in traj.steps) for traj in trajectories + ], + "avg_value": [ + sum(step.value for step in traj.steps) / len(traj.steps) + for traj in trajectories + ], + "num_steps": [len(traj.steps) for traj in trajectories], + } + + +class TrajectoryMetricsCallback(Callback): + """ + Compute metrics that are defined by task datasets. + + NOTE: evaluation portion's after_eval_step/loop() is not concurrency safe because + trajectories should be stored in the order of after_eval_step() calls. + """ + + def __init__( + self, + train_dataset: TaskDataset | None = None, + eval_dataset: TaskDataset | None = None, + train_metrics_transform: Callable[[dict[str, list[float]]], Any] = lambda x: x, + eval_metrics_transform: Callable[ + [list[dict[str, list[float]]]], Any + ] = lambda x: x, + ): + for ds in (train_dataset, eval_dataset): + if ds and not isinstance(ds, ComputeTrajectoryMetricsMixin): + raise ValueError( + f"Dataset {ds} didn't implement" + f" {ComputeTrajectoryMetricsMixin.__name__}, which is required for" + " this callback." + ) + self._train_metrics_fn = ( + train_dataset.compute_trajectory_metrics if train_dataset else None # type: ignore[attr-defined] + ) + self._eval_metrics_fn = ( + eval_dataset.compute_trajectory_metrics if eval_dataset else None # type: ignore[attr-defined] + ) + self._train_metrics_transform = train_metrics_transform + self._eval_metrics_transform = eval_metrics_transform + self._eval_trajectories: list[Sequence[Trajectory]] = [] + + async def after_train_step(self, trajectories: Sequence[Trajectory]) -> None: + if not self._train_metrics_fn: + return + self._train_metrics_transform(self._train_metrics_fn(trajectories)) + + async def after_eval_step(self, trajectories: Sequence[Trajectory]) -> None: + if not self._eval_metrics_fn: + return + self._eval_trajectories.append(trajectories) + + async def after_eval_loop(self) -> None: + if not self._eval_metrics_fn: + return + self._eval_metrics_transform([ + self._eval_metrics_fn(ts) for ts in self._eval_trajectories + ]) + self._eval_trajectories.clear() + + +class MeanMetricsCallback(TrajectoryMetricsCallback): + """Take a mean of all metrics.""" + + def __init__( + self, + train_dataset: TaskDataset | None = None, + eval_dataset: TaskDataset | None = None, + ): + super().__init__( + train_dataset, + eval_dataset, + train_metrics_transform=partial(self._compute_means, "_train_means"), + eval_metrics_transform=partial(self._compute_means, "_eval_means"), + ) + self._train_means: dict[str, float] | None = None + self._eval_means: dict[str, float] | None = None + + def _compute_means( + self, attr: str, metrics: dict[str, list[float]] | list[dict[str, list[float]]] + ) -> None: + if isinstance(metrics, list): # We need to flatten + buckets: dict[str, list[float]] = defaultdict(list) + for m in metrics: + for k, v in m.items(): + buckets[k].extend(v) + else: + buckets = metrics + setattr(self, attr, {k: sum(v) / len(v) for k, v in buckets.items()}) + + @property + def train_means(self) -> dict[str, float]: + if self._train_means is None: + raise RuntimeError( + "Training means are only available after this callback is invoked." + ) + return self._train_means + + @property + def eval_means(self) -> dict[str, float]: + if self._eval_means is None: + raise RuntimeError( + "Evaluation means are only available after this callback is invoked." + ) + return self._eval_means + + +class WandBLoggingCallback(TrajectoryMetricsCallback): + def __init__( + self, + train_dataset: TaskDataset | None = None, + eval_dataset: TaskDataset | None = None, + ): + if wandb is None: + raise ImportError( + f"{type(self).__name__} processing requires the 'monitor' extra for" + " 'wandb'. Please: `pip install aviary-internal[monitor]`." + ) + super().__init__( + train_dataset, + eval_dataset, + train_metrics_transform=self._train_log, + eval_metrics_transform=self._eval_log, + ) + + self._num_train_step = 0 + + async def after_train_step(self, trajectories: Sequence[Trajectory]) -> None: + self._num_train_step += 1 + return await super().after_train_step(trajectories) + + def _train_log(self, metrics: dict[str, list[float]]) -> None: + # Each wandb.log() increments the wandb step by 1. Log the training step here + # so we can use it as an x-axis for training metrics that are logged by different + # wandb.log() calls. + wandb.log( + { + f"train/{key}_mean": sum(vals) / len(vals) + for key, vals in metrics.items() + } + | {"train/step": self._num_train_step} + ) + + @staticmethod + def _eval_log(metrics: list[dict[str, list[float]]]) -> None: + flattened_metrics = defaultdict(list) + for m in metrics: + for k, v in m.items(): + flattened_metrics[k].extend(v) + wandb.log({ + f"eval/{key}_mean": sum(vals) / len(vals) + for key, vals in flattened_metrics.items() + }) diff --git a/ldp/alg/datasets.py b/ldp/alg/datasets.py new file mode 100644 index 00000000..687587fc --- /dev/null +++ b/ldp/alg/datasets.py @@ -0,0 +1,11 @@ +from aviary.env import TASK_DATASET_REGISTRY +from aviary.env import DummyTaskDataset as _DummyTaskDataset + +from ldp.alg.callbacks import ComputeTrajectoryMetricsMixin + + +class DummyTaskDataset(_DummyTaskDataset, ComputeTrajectoryMetricsMixin): + pass + + +TASK_DATASET_REGISTRY["dummy"] = "ldp.alg.datasets", "DummyTaskDataset" diff --git a/ldp/alg/optimizer/__init__.py b/ldp/alg/optimizer/__init__.py new file mode 100644 index 00000000..002ad152 --- /dev/null +++ b/ldp/alg/optimizer/__init__.py @@ -0,0 +1,96 @@ +from typing import Any, cast + +from pydantic import BaseModel, ConfigDict, Field + +from ldp.agent import Agent, DQNAgent, MemoryAgent, ReActAgent +from ldp.alg.optimizer.ape import APEOpt, APEScoreFn +from ldp.alg.optimizer.dqn import DQNOptimizer, DQNOptimizerConfig +from ldp.alg.optimizer.memory import MemoryOpt, PositiveMemoryOpt +from ldp.alg.optimizer.opt import _OPTIMIZER_REGISTRY, ChainedOptimizer, Optimizer + +_DEFAULT_OPTIMIZER_ERROR_MSG = ( + "Didn't yet implement an optimizer of type {opt_type} for {agent_type}." +) + + +class OptimizerConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + + optimizer_type: str + optimizer_kwargs: dict[str, Any] = Field(default_factory=dict) + + +_DEFAULT_OPTIMIZER_MAP: dict[type[Agent], type[Optimizer]] = { + MemoryAgent: MemoryOpt, + DQNAgent: DQNOptimizer, + ReActAgent: APEOpt, +} + + +def default_optimizer_factory( + agent: Agent, optimizer_cls: str | type[Optimizer] | None = None, **optimizer_kwargs +) -> Optimizer: + """A method that constructs a default optimizer for commonly-used agents. + + Args: + agent: Agent to construct the optimizer for. + optimizer_cls: The optimizer class to use. If not specified, we will try a default. + based on the provided agent. + optimizer_kwargs: Arguments forwarded to optimizer_cls. + + Returns: + Instantiated optimizer. + """ + if isinstance(optimizer_cls, str): + try: + optimizer_cls = _OPTIMIZER_REGISTRY[optimizer_cls] + except KeyError: + raise TypeError( + f"Optimizer class not supported by default_optimizer_factory: {optimizer_cls}" + ) from None + + if optimizer_cls is None: + optimizer_cls = _DEFAULT_OPTIMIZER_MAP.get(agent.__class__) + + # convince mypy that optimizer_cls is a type from here on + optimizer_cls = cast(type, optimizer_cls) + + if isinstance(agent, MemoryAgent): + if optimizer_cls != MemoryOpt: + raise NotImplementedError( + _DEFAULT_OPTIMIZER_ERROR_MSG.format( + opt_type=optimizer_cls.__name__, agent_type=MemoryAgent.__name__ + ) + ) + return MemoryOpt.from_agent(agent, **optimizer_kwargs) + if isinstance(agent, DQNAgent): + if optimizer_cls != DQNOptimizer: + raise NotImplementedError( + _DEFAULT_OPTIMIZER_ERROR_MSG.format( + opt_type=optimizer_cls.__name__, agent_type=DQNAgent.__name__ + ) + ) + return DQNOptimizer.from_agent(agent, **optimizer_kwargs) + if isinstance(agent, ReActAgent): + if optimizer_cls != APEOpt: + raise NotImplementedError( + _DEFAULT_OPTIMIZER_ERROR_MSG.format( + opt_type=optimizer_cls.__name__, agent_type=ReActAgent.__name__ + ) + ) + return APEOpt.from_agent(agent, **optimizer_kwargs) + raise TypeError(f"Unsupported agent type: {agent.__class__.__name__}") + + +__all__ = [ + "APEOpt", + "APEScoreFn", + "ChainedOptimizer", + "DQNOptimizer", + "DQNOptimizerConfig", + "MemoryOpt", + "Optimizer", + "OptimizerConfig", + "PositiveMemoryOpt", + "default_optimizer_factory", +] diff --git a/ldp/alg/optimizer/ape.py b/ldp/alg/optimizer/ape.py new file mode 100644 index 00000000..42068b24 --- /dev/null +++ b/ldp/alg/optimizer/ape.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +import asyncio +import logging +from collections import UserDict +from enum import StrEnum, auto +from typing import Any, Self, cast + +from aviary.message import Message +from pydantic import ( + BaseModel, + ConfigDict, + Field, + JsonValue, + RootModel, + ValidationError, + model_validator, +) + +from ldp.agent import ReActAgent +from ldp.alg.optimizer.opt import Optimizer +from ldp.data_structures import Trajectory +from ldp.graph.common_ops import LLMCallOp, PromptOp +from ldp.graph.ops import OpResult +from ldp.llms import LLMModel, LLMResult + +logger = logging.getLogger(__name__) + + +class APEScoreFn(StrEnum): + # Use the reward as the APE score (as proposed in the paper). + # Goal is to maximize this score. + REWARD = auto() + # Use the gradient of the output of the PromptOp as the APE score. + # Goal is to push this to zero. + GRADIENT = auto() + + +class _FormatDict(UserDict): + """Custom dictionary that stores missing items.""" + + def __init__(self) -> None: + super().__init__() + self.key_set: set[str] = set() + + def __missing__(self, key: str) -> str: + self.key_set.add(key) + return key + + +def get_formatted_variables(s: str) -> set[str]: + """Returns the set of variables implied by the format string.""" + format_dict = _FormatDict() + s.format_map(format_dict) + return format_dict.key_set + + +class OutputPrompt(BaseModel): + prompt: str = Field(description="Prompt for language model") + + +class Example(BaseModel): + input: JsonValue + output: JsonValue + score: float + + +ExampleList = RootModel[list[Example]] + + +class APEOpt(BaseModel, Optimizer): + """ + Basic optimizer that acts as an Automatic Prompt Engineer (APE). + + Paper: https://openreview.net/pdf?id=92gvk82DE- + + Details: + - This implements the "forward mode generation" strategy. + - The score function used is the gradient (float) at the output of the + PromptOp being optimized. A zero gradient means the prompt was "good", + and a non-zero gradient means we can learn from the prompt. + - Possible improvements include: + - Extending the score function to the LLM result's logprobs + - Iterating with Monte Carlo Search + - Use of memory for further example augmentation + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + ### Configuration + max_examples: int | None = Field( + default=50, # Comes from APE paper section 5.2 + description=( + "Max number of examples to include in the below query_prompt, or None for" + " no limit. The paper mentions that more examples produce better prompt" + " proposals." + ), + ) + system_prompt: str = ( + "We are optimizing prompts for a language model." + " The model sees a prompt, an input, and then generates an output." + ) + query_prompt: str = ( + "Here are correct example outputs that the language model and prompt should produce:" + "\n{good_examples}" + '\n\nThe current prompt is: "{prompt}"' + "\n\nWhich resulted in the following incorrect input, output, and {score}:" + "\n{examples}" + "\n\nRevise the current prompt to improve the outputs." + " Your proposed prompt should be concise, correct, and specify the desired output format." + ) + llm: LLMModel = Field( + default_factory=LLMModel, + description=( + "LLM used to update the prompt inside the PromptOp. The paper mentions that" + " larger models produce better prompt proposals." + ), + ) + + prompt_op: PromptOp = Field(description="PromptOp to be optimized.") + llm_call_op: LLMCallOp = Field(description="LLMCallOp to be optimized.") + + score_fn: APEScoreFn = APEScoreFn.REWARD + good_reward_threshold: float | None = Field( + default=None, + description="If using reward as the score_fn, then a good example is defined by " + "reward>=good_reward_threshold.", + ) + reward_discount: float = 1.0 + + ### State + examples: list[Example] = Field(default_factory=list) + good_examples: list[Example] = Field(default_factory=list) + steps: int = 0 + trace: list[str] = Field( + default_factory=list, description="History of prompts used." + ) + + @model_validator(mode="after") + def validate_score_fn(self): + if self.score_fn == APEScoreFn.REWARD: + if self.good_reward_threshold is None: + raise ValueError( + "good_reward_threshold must be set if using reward as the score function" + ) + self._score_str = "rewards" + elif self.score_fn == APEScoreFn.GRADIENT: + # The gradient into the prompt op is the (signed) backpropagated error, and "gradient" would + # be confusing to the model in the prompt. + self._score_str = "errors" + else: + raise ValueError(f"Invalid score function {self.score_fn}") + + return self + + def model_post_init(self, __context: Any) -> None: + if self.prompt_op.prompt not in self.trace: + self.trace.append(self.prompt_op.prompt) + + # Make sure updates are not run concurrently + self._update_lock = asyncio.Lock() + + @classmethod + def from_agent(cls, agent: ReActAgent, **kwargs) -> Self: + return cls( + llm_call_op=agent._react_module.tool_select_module.llm_call_op, + prompt_op=agent._react_module.prompt_op, + **kwargs, + ) + + def aggregate_trajectory(self, trajectory: Trajectory) -> None: + if trajectory.failed: + return + + if self.score_fn == APEScoreFn.REWARD: + d_returns = trajectory.compute_discounted_returns(self.reward_discount) + + for i_step, step in enumerate(trajectory.steps): + action_call_id = cast(OpResult, step.action).call_id + llm_call_ids = self.llm_call_op.get_call_ids({action_call_id.run_id}) + + if self.score_fn == APEScoreFn.GRADIENT: + prompt_call_id, *extra_prompt_call_ids = self.prompt_op.get_call_ids({ + action_call_id.run_id + }) + # TODO: loosen this restriction once grad acc/topological traversal are done + assert ( + not extra_prompt_call_ids + ), "APE only supports one prompt call per run" + + for llm_call_id in llm_call_ids: + result = cast( + LLMResult | None, + self.llm_call_op.ctx.get(llm_call_id, "result"), + ) + if result is None or not result.messages or not result.prompt: + continue + # (x: first prompt's user message's content, y: AI response's content) + x = next( + cast(str, m.content) for m in result.prompt if m.role == "user" + ) + y = cast(str, result.messages[0].content) + + if self.score_fn == APEScoreFn.GRADIENT: + score = self.prompt_op.ctx.get( + prompt_call_id, "grad_output", default=None + ) + if score is None: + # backprop did not reach this op call - move on + continue + is_good = score == 0 + else: + score = d_returns[i_step] # pylint: disable=possibly-used-before-assignment + is_good = score >= cast(float, self.good_reward_threshold) + + example = Example(input=x, output=y, score=score) + (self.good_examples if is_good else self.examples).append(example) + + async def update(self) -> None: + async with self._update_lock: + if not self.examples: + raise ValueError("No examples to update the prompt with.") + + new_p = await self._get_updated_prompt( + self.examples, self.good_examples, self.prompt_op.prompt + ) + # Check any template vars remain, and if some were added or + # lost, discard this new prompt + if new_p != self.prompt_op.prompt and get_formatted_variables( + new_p + ) != get_formatted_variables(self.prompt_op.prompt): + logger.warning( + "Update broke prompt templating." + f"\n\nNew prompt:\n{new_p}" + f"\n\nPrior prompt:\n{self.prompt_op.prompt}" + ) + else: + if new_p == self.prompt_op.prompt: + logger.warning("Update did not change the prompt.") + self.examples.clear() + self.prompt_op.prompt = new_p + self.trace.append(new_p) + self.steps += 1 + + def _prepare_examples(self, examples: list[Example]) -> str: + if not examples: + return "" + if self.max_examples and len(examples) > self.max_examples: + if self.score_fn == APEScoreFn.GRADIENT: + # Return examples closest to decision boundary, + # aka ones with the lowest L1-normalized error + # NOTE: this pairs with our setting of Example.score = PromptOp's output + # gradient inside the update method, so examples with error values closer to + # 0 are defined to be higher quality + examples = sorted(examples, key=lambda e: abs(e.score))[ + : self.max_examples + ] + else: + # In reward mode, we want to show the examples with the highest reward, per the paper + # TODO: consider whether uniform sampling is better + examples = sorted(examples, key=lambda e: -e.score)[: self.max_examples] + return ExampleList.model_validate(examples).model_dump_json() + + async def _get_updated_prompt( + self, examples: list[Example], good_examples: list[Example], prompt: str + ) -> str: + messages = [ + Message( + role="system", + content=self.system_prompt, + ), + Message( + role="user", + content=self.query_prompt.format( + examples=self._prepare_examples(examples), + good_examples=self._prepare_examples(good_examples), + prompt=prompt, + score=self._score_str, + ), + ), + Message( + role="assistant", + content=OutputPrompt(prompt=prompt).model_dump_json(indent=2), + ), + Message( + content=( + "You responded without changing the prompt. Don't forget to revise" + " the prompt." + ) + ), + ] + result = await self.llm.call(messages, output_type=OutputPrompt) + message_content = cast(str, cast(list[Message], result.messages)[-1].content) + try: + return OutputPrompt.model_validate_json(message_content).prompt + except ValidationError: + return prompt diff --git a/ldp/alg/optimizer/dqn.py b/ldp/alg/optimizer/dqn.py new file mode 100644 index 00000000..208f9f8c --- /dev/null +++ b/ldp/alg/optimizer/dqn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import asyncio +import itertools +import random +from collections.abc import Sequence +from copy import deepcopy +from enum import StrEnum +from math import ceil +from typing import Any, Self, cast +from uuid import UUID + +import torch +import torch.optim +from pydantic import BaseModel, ConfigDict, Field, model_validator +from torch.nn import functional as F # noqa: N812 + +from ldp.agent import DQNAgent +from ldp.alg.algorithms import discounted_returns +from ldp.alg.optimizer.opt import Optimizer +from ldp.alg.optimizer.replay_buffers import CircularReplayBuffer +from ldp.data_structures import Trajectory, Transition +from ldp.graph.modules import DQNOp, DQNPolicyModule +from ldp.graph.op_utils import eval_mode +from ldp.graph.ops import OpResult + +try: + import wandb +except ImportError: + wandb = None # type: ignore[assignment] + + +class DQNTarget(StrEnum): + Q = "Q" # Standard Bellman target + SARSA = "SARSA" # SARSA target + MC_SARSA = "MC_SARSA" # Monte Carlo SARSA target (discounted cumulative return) + + +class DQNOptimizerConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + + lr: float = 0.001 + num_update_epochs: int = Field( + 1, + description="Number of passes through the train buffer " + "for every update() call.", + ) + batch_size: int = 32 + gradient_checkpointing: bool = Field( + default=False, + description="Only supported for transformers models.", + ) + target: DQNTarget = DQNTarget.Q + + eval_every: int | float | None = Field( + None, + description="Check validation loss every eval_every steps. " + "If None, don't run validation. If a float in (0, 1], " + "run validation every eval_every fraction of the train buffer.", + ) + early_stopping_tolerance: float = Field( + 0.1, + description="If the validation loss increases by more than this much, " + "early stop. Default is 10%. ", + ) + + ignore_truncated: bool = Field( + default=False, + description="If True, do not train on any transitions from a " + "truncated trajectory. This can make sense if rewards are not emitted " + "until the trajectory finishes.", + ) + train_buffer_size: int | None = Field( + 1_000, + description="Size of the replay buffer used to train the Q network. " + "If None, the buffer can grow arbitrarily large. ", + ) + val_buffer_size: int | None = Field( + 100, + description="Size of the replay buffer used for evaluating the Q network." + "If None, the buffer can grow arbitrarily large. ", + ) + val_frac: float | None = Field( + None, + description="Fraction of aggregated trajectories that will be added to the " + "val buffer. If not set, will attempt to infer from [val,train]_buffer_sizes.", + ) + + continual_training: bool = Field( + default=True, + description="If True (default), will continually train the DQN " + "across update() calls. If False, will reset the DQN on each " + "update() call and retrain from scratch using the train buffer.", + ) + + reward_discount: float = 1.0 + soft_update_tau: float = Field( + 1.0, + description="Update coefficient for the target network if performing Q learning. " + "1.0 means we use the policy network at the end of the previous update() as the " + "target network for the next update().", + ) + + optimizer: str = "SGD" + optimizer_kwargs: dict[str, Any] = Field(default_factory=dict) + + @model_validator(mode="after") + def set_val_sizes(self): + if self.eval_every is None: + # don't need to store anything in the val buffer if we're + # not running eval + self.val_buffer_size = 0 + self.val_frac = 0.0 + + if self.val_frac is None: + if self.val_buffer_size is None or self.train_buffer_size is None: + raise ValueError( + "If val_frac is not set, val_buffer_size and train_buffer_size " + "must be set." + ) + self.val_frac = self.val_buffer_size / ( + self.val_buffer_size + self.train_buffer_size + ) + + return self + + +class DQNOptimizer(Optimizer): + def __init__( + self, + policy: DQNPolicyModule, + config: DQNOptimizerConfig | None = None, + log_to_wandb: bool = False, + ): + if config is None: + config = DQNOptimizerConfig() + self.config = config + + if log_to_wandb and wandb is None: + raise ImportError( + "If logging to wandb, please install its package via `pip install wandb`." + ) + self.log_to_wandb = log_to_wandb + + self.policy = policy + + # A buffer element consists of: + # - s_t, a_t + # - r_{t+1} + # - all the (s_{t+1}, a_{t+1}) candidates considered for the next transition + # - other metadata like the value estimate when sampling + self.train_buffer = CircularReplayBuffer() + self.val_buffer = CircularReplayBuffer() + + if not self.config.continual_training: + # Store a copy of the state dict so we can reset the model + self.init_state_dict = deepcopy(self.policy.dqn.network.state_dict()) + + optimizer_cls = getattr(torch.optim, self.config.optimizer) + self.optimizer = optimizer_cls( + self.dqn_op.network.parameters(), + lr=self.config.lr, + **self.config.optimizer_kwargs, + ) + # Define the model's device/dtype from a random parameter - we assume they're all the same + param = next(self.dqn_op.network.parameters()) + self.device = param.device + self.dtype = param.dtype + + if self.config.gradient_checkpointing: + try: + self.dqn_op.network.gradient_checkpointing_enable() + except AttributeError as e: + raise ValueError( + "Gradient checkpointing was requested, but we do not " + f"support it for {self.dqn_op.network.__class__.__name__}" + ) from e + + @classmethod + def from_agent(cls, agent: DQNAgent, **kwargs) -> Self: + return cls(policy=agent._dqn_policy, **kwargs) + + @property + def dqn_op(self) -> DQNOp: + return self.policy.dqn + + def _update_target_network(self) -> None: + tau = self.config.soft_update_tau + target_state_dict = self.dqn_op.target_network.state_dict() + for name, param in self.dqn_op.network.named_parameters(): + target_param = target_state_dict[name] + # write it it out this way so we're doing in-place operations + # The actual math is target_param = tau * param + (1 - tau) * target_param + target_param.data *= 1 - tau + target_param.data += tau * param.data + + def aggregate_trajectory(self, trajectory: Trajectory) -> None: # noqa: C901 + """Add training examples to the replay buffers.""" + if trajectory.failed or not trajectory.steps: + return + + if self.config.ignore_truncated and trajectory.steps[-1].truncated: + return + + # use the same buffer for all transitions in this trajectory to avoid + # train/val leakage. Remember, we want to generalize to unseen trajectories. + buffer = ( + self.val_buffer + if random.uniform(0, 1) < cast(float, self.config.val_frac) + else self.train_buffer + ) + + steps = cast(list[Transition | None], [*trajectory.steps, None]) + d_returns = discounted_returns( + [step.reward for step in trajectory.steps], + [step.done for step in trajectory.steps], + self.config.reward_discount, + ) + + for (step, next_step), d_return in zip( + itertools.pairwise(steps), d_returns, strict=True + ): + step = cast(Transition, step) + + if step.truncated: + break + + if next_step is None: + assert step.done + + # checking trajectory.failed ensures this is an OpResult + run_id = cast(OpResult, step.action).call_id.run_id + + if self.config.target == DQNTarget.MC_SARSA: + # Don't need next_state_action_cands - avoid ctx lookup + next_state_action_cands: list[str] | None = None + elif step.done: + # Last step, so the target is just r_t+1 + next_state_action_cands = [] + else: + next_state_action_cands = self._get_state_action_cands( + cast(OpResult, cast(Transition, next_step).action).call_id.run_id + ) + + dqn_call_ids = self.dqn_op.get_call_ids({run_id}) + for dqn_call_id in dqn_call_ids: + if ( + self.dqn_op.ctx.get(dqn_call_id, "grad_output", default=None) + is None + ): + # This op call was pruned from backward compute graph - skip. + continue + + tensor_args, tensor_kwargs = self.dqn_op.ctx.get( + dqn_call_id, DQNOp.CTX_TENSOR_INPUT_KEY + ) + + buffer.append({ + # (s_t, a_t) + "input_args": tensor_args, + "input_kwargs": tensor_kwargs, + # r_{t+1} + "reward": step.reward, + # all the (s_{t+1}, a_{t+1}) candidates considered for the next transition + "next_state_action_cands": next_state_action_cands, + # other metadata like the value estimate when sampling + "q": step.value, + "discounted_return": d_return, + }) + + if self.config.train_buffer_size: + self.train_buffer.resize(self.config.train_buffer_size) + if self.config.val_buffer_size: + self.val_buffer.resize(self.config.val_buffer_size) + + async def update(self): # noqa: C901 + num_samples = len(self.train_buffer) + if num_samples < self.config.batch_size: + return + + if self.log_to_wandb: + wandb.log({"dqn/train_buffer_size": num_samples}) + + num_steps_per_epoch = ceil(num_samples / self.config.batch_size) + num_train_steps = self.config.num_update_epochs * num_steps_per_epoch + + val_every = self.config.eval_every or 0 + if 0 < val_every <= 1: + val_every = int(val_every * num_steps_per_epoch) + + best_val_loss = float("inf") + best_ckpt: dict | None = None + + if not self.config.continual_training: + self.dqn_op.network.load_state_dict(self.init_state_dict) + + for batch_num, batch in enumerate( + self.train_buffer.batched_iter( + self.config.batch_size, + infinite=True, # will count the number of batches manually + ), + start=1, + ): + tensor_args, tensor_kwargs = self._collate_fn( + batch["input_args"], batch["input_kwargs"] + ) + targets = await self._compute_targets(batch) + + tensor_args, tensor_kwargs, targets = self._move_tensors( + tensor_args, tensor_kwargs, targets + ) + + self.optimizer.zero_grad() + self.dqn_op.network.train() + with torch.autocast(device_type=self.device.type, dtype=self.dtype): + loss = self._compute_loss(tensor_args, tensor_kwargs, targets) + loss.backward() + self.optimizer.step() + + if self.log_to_wandb: + qs = torch.tensor(batch["q"]) + returns = torch.tensor(batch["discounted_return"]) + wandb.log({ + "dqn/minibatch_loss": loss.item(), + "dqn/minibatch_empirical_error": F.mse_loss(qs, returns).item(), + }) + + if val_every and self.val_buffer and batch_num % val_every == 0: + val_loss = await self.run_val_loop(_internal_call=True) + if val_loss < best_val_loss: + best_val_loss = val_loss + best_ckpt = self.dqn_op.network.state_dict() + elif ( + val_loss + > (1 + self.config.early_stopping_tolerance) * best_val_loss + ): + # early stopping. best_ckpt will not be None if best_val_loss is finite. + self.dqn_op.network.load_state_dict(cast(dict, best_ckpt)) + if self.log_to_wandb: + n_epochs = ( + batch_num * self.config.batch_size / len(self.train_buffer) + ) + wandb.log({"dqn/early_stopping_epoch": n_epochs}) + break + + if batch_num == num_train_steps: + break + + if best_ckpt is not None: + # Reset to best ckpt, in case it was from an earlier epoch and ES didn't kick in + self.dqn_op.network.load_state_dict(best_ckpt) + + # TODO: should we do this here? Or per-epoch? Or per-batch? Unclear what to do in this + # semi-offline setting. Maybe experiment. + self._update_target_network() + + @torch.no_grad() + async def run_val_loop(self, _internal_call: bool = False) -> float: + # _internal_call is set to True if this is being called by the optimizer, in which case + # we want to log things. + losses = [] + + if _internal_call and self.log_to_wandb: + wandb.log({"dqn/val_buffer_size": len(self.val_buffer)}) + + for batch in self.val_buffer.batched_iter(self.config.batch_size): + tensor_args, tensor_kwargs = self._collate_fn( + batch["input_args"], batch["input_kwargs"] + ) + targets = await self._compute_targets(batch) + + tensor_args, tensor_kwargs, targets = self._move_tensors( + tensor_args, tensor_kwargs, targets + ) + + with torch.autocast(device_type=self.device.type, dtype=self.dtype): + loss = self._compute_loss(tensor_args, tensor_kwargs, targets) + + losses.append(loss.item()) + + losses_tensor = torch.tensor(losses) + val_loss, val_loss_std = losses_tensor.mean().item(), losses_tensor.std().item() + if _internal_call and self.log_to_wandb: + wandb.log({ + "dqn/val_loss": val_loss, + "dqn/val_loss_std": val_loss_std, + }) + + return val_loss + + def _get_state_action_cands(self, run_id: UUID) -> list[str]: + actor_op = self.policy.action_selector + call_id, *extra = actor_op.get_call_ids({run_id}) + if extra: + raise RuntimeError( + "DQNOptimizer cannot handle a actor op that was called more than once " + "in a compute graph." + ) + + state_actions = cast(list[str], actor_op.ctx.get(call_id, "state_actions")) + + if self.config.target == DQNTarget.Q: + # Off-policy: need to see the whole action space (or a set of unbiased samples + # from it). + return state_actions + + # On-policy: only need the selected action + i_selected = cast(int, actor_op.ctx.get(call_id, "i_selected")) + return [state_actions[i_selected]] + + async def _compute_targets(self, batch: dict[str, list[Any]]) -> torch.Tensor: + with self.dqn_op.use_target_network(): + targets = await asyncio.gather( + *list( + itertools.starmap( + self._compute_target, + zip( + batch["reward"], + batch["discounted_return"], + batch["next_state_action_cands"], + strict=True, + ), + ) + ) + ) + + return torch.tensor(targets, dtype=torch.float32)[:, None] + + @eval_mode() + async def _compute_target( + self, + r_tp1: float, + discounted_return: float, + next_state_action_cands: Sequence[str], + ) -> float: + if self.config.target == DQNTarget.MC_SARSA: + return discounted_return + + if not next_state_action_cands: + # This is the last step, so there is no next state + return r_tp1 + + # We use the Bellman equation to compute the target + # Q(s_t, a_t) = r_{t+1} + gamma * max_a Q(s_{t+1}, a) + # where a is from our set of candidates + q_tp1s = await asyncio.gather(*[ + self.dqn_op(s_a) for s_a in next_state_action_cands + ]) + + # TODO: add DDQN + q_tp1 = max(q.value for q in q_tp1s) + + return r_tp1 + self.config.reward_discount * q_tp1 + + def _move_tensors( + self, tensor_args, tensor_kwargs, target_tensor + ) -> tuple[list[torch.Tensor], dict[str, torch.Tensor], torch.Tensor]: + tensor_args = [arg.to(device=self.device) for arg in tensor_args] + tensor_kwargs = { + k: kwarg.to(device=self.device) for k, kwarg in tensor_kwargs.items() + } + target_tensor = target_tensor.to(device=self.device) + + return tensor_args, tensor_kwargs, target_tensor + + def _collate_fn( + self, + input_args: list[list[torch.Tensor]], + input_kwargs: list[dict[str, torch.Tensor]], + ) -> tuple[list[torch.Tensor], dict[str, torch.Tensor]]: + tensor_args = [ + torch.stack([inp[i] for inp in input_args], dim=0) + # use the first element to find the number of args + for i in range(len(input_args[0])) + ] + + tensor_kwargs = { + key: torch.stack([inp[key] for inp in input_kwargs], dim=0) + # use the first element to find the keys + for key in input_kwargs[0] + } + + return tensor_args, tensor_kwargs + + def _compute_loss( + self, + tensor_args: list[torch.Tensor], + tensor_kwargs: dict[str, torch.Tensor], + targets: torch.Tensor, + ) -> torch.Tensor: + pred = self.dqn_op.network(*tensor_args, **tensor_kwargs) + return F.mse_loss(pred, targets) diff --git a/ldp/alg/optimizer/memory.py b/ldp/alg/optimizer/memory.py new file mode 100644 index 00000000..516516be --- /dev/null +++ b/ldp/alg/optimizer/memory.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import logging +from itertools import product +from typing import Self, cast + +from pydantic import BaseModel, ConfigDict, Field + +from ldp.agent import MemoryAgent +from ldp.alg.optimizer.opt import Optimizer +from ldp.data_structures import Trajectory +from ldp.graph.common_ops import MemoryOp +from ldp.graph.memory import Memory +from ldp.graph.op_utils import CallID +from ldp.graph.ops import Op, OpResult + +logger = logging.getLogger(__name__) + + +class MemoryOpt(BaseModel, Optimizer): + """Trainer for memory agents. By default it is a minimizer. + + We simply store the memories in the memory op with their gradient. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + ### Configuration + memory_op: MemoryOp + output_op: Op + reward_discount: float = 1.0 + + ### State + steps: int = 0 + example_buffer: list[tuple[CallID, CallID, float]] = Field(default_factory=list) + + @classmethod + def from_agent(cls, agent: MemoryAgent, **kwargs) -> Self: + return cls(memory_op=agent._memory_op, output_op=agent._llm_call_op, **kwargs) + + def _memory_filter( + self, call_id: CallID, memory_op: MemoryOp, d_return: float + ) -> bool: + # only keep memories that backprop reached, i.e. those that were used in + # achieving the reward + return memory_op.ctx.get(call_id, "grad_output", default=None) is not None + + def aggregate_trajectory(self, trajectory: Trajectory) -> None: + # NOTE: this is a little dangerous. This optimizer currently + # does not check which memory op calls are upstream of output op calls, + # besides making sure they belong to the same run. + # This is not a problem if we have no branching in the compute graph + # between the memory op and the *final* output op. + # TODO: fix the above using OpResult.traverse() to find the upstream calls + + if trajectory.failed: + return + + d_returns = trajectory.compute_discounted_returns(self.reward_discount) + + for step, d_return in zip(trajectory.steps, d_returns, strict=True): + output = cast(OpResult, step.action) + mem_call_ids = self.memory_op.get_call_ids({output.call_id.run_id}) + mem_call_ids = { + m + for m in mem_call_ids + if self._memory_filter(m, self.memory_op, d_return) + } + output_call_ids = self.output_op.get_call_ids({output.call_id.run_id}) + if len(mem_call_ids) > 1 and len(output_call_ids) > 1: + raise ValueError( + "Multiple memory or output calls in a single run - this violates our 1-1 correspondence assumption." + ) + + self.example_buffer.extend( + (*x, d_return) for x in product(mem_call_ids, output_call_ids) + ) + + async def update(self) -> None: + """Create new memories from the example buffer and add them to MemoryOp.""" + new_memories = [] + for mem_call_id, output_call_id, d_return in self.example_buffer: + query = self.memory_op.ctx.get(mem_call_id, "query") + input = self.memory_op.ctx.get(mem_call_id, "memory_input") # noqa: A001 + new_memories.append( + # why do we want this gradient and not memory_op's grad output? + Memory( + query=query, + input=input if input is not None else query, + output=str(self.output_op.ctx.get(output_call_id, "output").value), + value=d_return, + run_id=output_call_id.run_id, + template="Input: {input}\nOutput: {output}\nReward: {value}", + ) + ) + + for memory in new_memories: + await self.memory_op.memory_model.add_memory(memory) + self.steps += 1 + self.example_buffer.clear() + + +class PositiveMemoryOpt(MemoryOpt): + def _memory_filter( + self, call_id: CallID, memory_op: MemoryOp, d_return: float + ) -> bool: + # only keep positive memories + return d_return > 0 and super()._memory_filter(call_id, memory_op, d_return) diff --git a/ldp/alg/optimizer/openai_sft_optimizer.py b/ldp/alg/optimizer/openai_sft_optimizer.py new file mode 100644 index 00000000..00ce6eee --- /dev/null +++ b/ldp/alg/optimizer/openai_sft_optimizer.py @@ -0,0 +1,309 @@ +"""This module defines an expert iteration optimizer for black-box OpenAI LLMs. + +The optimizer manages the collation and formatting of training rollout data and initiates fine-tuning jobs through +OpenAI's API: + +https://platform.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model + +For expert iteration see: + +Havrilla et al. 2024. Teaching large language models to reason with reinforcement learning. +arXiv preprint arXiv:2403.04642. (https://arxiv.org/pdf/2403.04642) + +Example Usage: + - Instantiate the `BlackBoxLLMSFTOpt` with the necessary configuration. + - Accumulate training rollout examples by calling `aggregate_trajectory`. + - Update the model by invoking `update`, which prepares the training data, + uploads it, and triggers the fine-tuning process. +""" + +import json +import logging +import tempfile +import time +from collections.abc import Callable +from typing import Any, Self, cast + +import openai +from pydantic import BaseModel, ConfigDict, Field + +from ldp.agent import ReActAgent +from ldp.alg.optimizer.opt import Optimizer +from ldp.data_structures import Trajectory +from ldp.graph.common_ops import LLMCallOp +from ldp.graph.ops import OpResult + +logger = logging.getLogger(__name__) + + +class OpenAISFTOptConfig(BaseModel): + """Configuration class for the BlackBoxLLMSFTOpt optimizer. + + This class holds various configuration parameters for the optimizer. + """ + + lr: float = 0.001 + num_epochs: int = 1 + buffer_size: int | None = Field( + default=None, + description="Maximum number of finetuning examples to accumulate. " + "If None, the buffer has no size limit.", + ) + val_frac: float = 0.1 + reward_discount: float = 1.0 # Discount factor in [0, 1] for rewards + return_threshold: float | None = Field( + default=None, + description="Minimum return required for a trajectory to be added to the training buffer. If None, " + "all trajectories are added.", + ) + + +class OpenAISFTOpt(BaseModel, Optimizer): + """An optimizer for finetuning black-box LLMs that interact via an API. + + Expert Iteration (SFT) optimizer for fine-tuning black-box OpenAI LLMs. + It handles the aggregation of training data, manages a buffer of training examples, + and initiates fine-tuning jobs via the API. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + # Configuration + config: OpenAISFTOptConfig = Field(default_factory=OpenAISFTOptConfig) + log_to_wandb: bool = False + llm_call_op: LLMCallOp + client: openai.OpenAI = Field(default_factory=openai.OpenAI) + + # State + train_buffer: list = Field(default_factory=list) + val_buffer: list = Field(default_factory=list) + fine_tune_job_id: str | None = None + + train_dataset: list[Any] | None = None + val_dataset: list[Any] | None = None + + def __init__(self, **data): + super().__init__(**data) + + # Validate and populate the training and validation buffers + if self.train_dataset: + self.train_buffer.extend(self.train_dataset) + if self.val_dataset: + self.val_buffer.extend(self.val_dataset) + + @property + def buffer_is_full(self) -> bool: + return ( + self.config.buffer_size is not None + and len(self.train_buffer) >= self.config.buffer_size + ) + + def aggregate_trajectory( + self, + trajectory: Trajectory, + buffer_type: str = "train", + len_penalty_fn: Callable[[int], float] | None = None, + ) -> None: + """Adds training rollout examples from a trajectory to the training buffer. + + This method extracts rollouts and their corresponding discounted returns from a trajectory and stores them + in the appropriate buffer (training or validation) if they meet the return threshold criteria. + + We apply a weight of 1 to actions and a weight of 0 to states. This reflects the fact that we want to train the + agent using P(action | state) as the target distribution. Note that in the OpenAI API, the weight may only be + applied to assistant messages. + + Args: + trajectory: The trajectory containing rollouts and rewards. + buffer_type: The buffer to which the trajectory should be added. Must be either "train" or "validation". + len_penalty_fn: An optional callable that takes an integer (the length of + the list of discounted returns) and returns a scalar penalty to be applied to the discounted return. + + Raises: + RuntimeError: If a rollout in the trajectory does not have an associated compute graph. + ValueError: If the supplied buffer type is invalid. Must be either "train" or "validation". + """ + # Validate buffer type + if buffer_type not in {"train", "validation"}: + raise ValueError('buffer_type must be either "train" or "validation".') + + # Compute the discounted returns + discounted_returns = trajectory.compute_discounted_returns( + self.config.reward_discount + ) + + # Apply the penalty on the length of the trajectory if a penalty function is provided + if len_penalty_fn is not None: + penalty = len_penalty_fn(len(discounted_returns)) + modified_return = discounted_returns[0] * penalty + else: + modified_return = discounted_returns[0] + + # Don't add trajectory to the buffer if it failed or doesn't meet the return threshold + if trajectory.failed or ( + self.config.return_threshold is not None + and modified_return < self.config.return_threshold + ): + return + + traj_msgs = [] + for step in trajectory.steps: + action_call_id = cast(OpResult, step.action).call_id + if action_call_id is None: + raise RuntimeError("Received an action without compute graph attached.") + call_ids = self.llm_call_op.get_call_ids({action_call_id.run_id}) + for call_id in call_ids: + if ( + self.llm_call_op.ctx.get(call_id, "grad_output", default=None) + is None + ): + # This op call was pruned from backward compute graph - skip. + continue + + _, input_kwargs = self.llm_call_op.ctx.get(call_id, "input") + outputs = self.llm_call_op.ctx.get(call_id, "output").value.model_dump() + + # Add "weight": 1 to the outputs dictionary. NB: weight should ONLY be added to assistant messages. All + # output messages are assumed to be assistant messages and will throw an error otherwise. + outputs["weight"] = 1 + + # Just supply list of messages here. Call model_dump on each element of list. Add weight = 0 for input + traj_msgs += [ + { + **msg.model_dump(), + **({"weight": 0} if msg.role == "assistant" else {}), + } + for msg in OpResult.unwrap_value(input_kwargs["msgs"]) + ] + traj_msgs.append(outputs) + + # Choose the appropriate buffer + target_buffer = self.train_buffer if buffer_type == "train" else self.val_buffer + + # Add trajectory to the specified buffer. Buffer is List[List[dict]] + target_buffer.append(traj_msgs) + + # If buffer size is set, ensure that the buffer does not exceed the specified size. If it does exceed the size + # remove the oldest samples. + if ( + self.config.buffer_size is not None + and len(target_buffer) >= self.config.buffer_size + ): + # Calculate the starting index for slicing + start_index = len(target_buffer) - self.config.buffer_size + # Assign the last `buffer_size` elements to `target_buffer` + target_buffer[:] = target_buffer[start_index:] + + async def update(self, check_for_completion: bool = False): + """Updates the model parameters based on the accumulated training data. + + This method processes the accumulated training data by formatting it into the appropriate structure for the + API, uploads it, and then initiates a fine-tuning job. It is important to note that the OpenAI finetuning API + has a minimum requirement of 10 training examples (trajectories) to perform fine-tuning. + + Args: + check_for_completion: A flag to indicate whether to check for the completion of the fine-tuning job. + + Raises: + ValueError: If the training data fails to upload or the fine-tuning job fails to start. + """ + # Prepare the data for fine-tuning in chat format + training_data = [{"messages": traj} for traj in self.train_buffer] + validation_data = ( + [{"messages": traj} for traj in self.val_buffer] + if self.val_buffer + else None + ) + + if not training_data: + return + + def write_to_tempfile(data): + with tempfile.NamedTemporaryFile( + delete=False, suffix=".jsonl" + ) as temp_file: + for example in data: + temp_file.write((json.dumps(example) + "\n").encode("utf-8")) + return temp_file.name + + train_temp_file_path = write_to_tempfile(training_data) + val_temp_file_path = ( + write_to_tempfile(validation_data) if validation_data else None + ) + + try: + with open(train_temp_file_path, "rb") as train_file: + file_id = self.client.files.create( + file=train_file, purpose="fine-tune" + ).id + + val_file_id = None + if val_temp_file_path: + with open(val_temp_file_path, "rb") as val_file: + val_file_id = self.client.files.create( + file=val_file, purpose="fine-tune" + ).id + + fine_tune_job = self.client.fine_tuning.jobs.create( + training_file=file_id, + validation_file=val_file_id, + model="gpt-3.5-turbo", + ) + + self.fine_tune_job_id = fine_tune_job.id + logger.info(f"Fine-tuning job created with ID: {self.fine_tune_job_id}") + + # Check the status of the job periodically until it completes + if check_for_completion: + while True: + job_status = self.client.fine_tuning.jobs.retrieve( + self.fine_tune_job_id + ) + status = job_status.status + + if status == "succeeded": + logger.info("Fine-tuning job succeeded.") + break + if status == "failed": + logger.error( + f"Fine-tuning job failed with status: {job_status}" + ) + raise ValueError( + f"Fine-tuning job failed with status: {job_status}" + ) + logger.info( + f"Fine-tuning job is still running. Current status: {status}" + ) + time.sleep(30) # Wait 30 seconds before checking the status again + + except (openai.APIConnectionError, openai.RateLimitError, openai.APIError) as e: + logger.exception("Error during fine-tuning job creation") + raise ValueError("Failed to create the fine-tuning job.") from e + + def clear_train_buffer(self): + """Clear the training buffer.""" + self.train_buffer.clear() + + def clear_val_buffer(self): + """Clear the validation buffer.""" + self.val_buffer.clear() + + @classmethod + def from_agent(cls, agent: ReActAgent, **kwargs) -> Self: + """Creates an instance of the OpenAISFTOpt class from an existing ReActAgent by extracting. + + the LLM call operation (llm_call_op) from the provided ReActAgent. At the moment, only initialization + from ReActAgent is supported. + + Args: + agent: The ReActAgent from which to extract the LLM call operation. + **kwargs: Additional keyword arguments to pass to the OpenAISFTOpt constructor. + + Returns: + OpenAISFTOpt: An instance of the OpenAISFTOpt class initialized with the LLM call + operation from the provided ReActAgent. + """ + return cls( + llm_call_op=agent._react_module.tool_select_module.llm_call_op, + **kwargs, + ) diff --git a/ldp/alg/optimizer/opt.py b/ldp/alg/optimizer/opt.py new file mode 100644 index 00000000..afdf84d7 --- /dev/null +++ b/ldp/alg/optimizer/opt.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import logging +from abc import ABC, abstractmethod +from collections.abc import Iterable + +from ldp.data_structures import Trajectory + +logger = logging.getLogger(__name__) + + +# Registry for all optimizers +_OPTIMIZER_REGISTRY: dict[str, type[Optimizer]] = {} + + +class Optimizer(ABC): + """Base class for all optimizers.""" + + def __init_subclass__(cls) -> None: + # Register each optimizer subclass + _OPTIMIZER_REGISTRY[cls.__name__] = cls + return super().__init_subclass__() + + def aggregate(self, trajectories: Iterable[Trajectory]) -> None: + """Aggregate trajectories to construct training samples.""" + for trajectory in trajectories: + self.aggregate_trajectory(trajectory) + + @abstractmethod + def aggregate_trajectory(self, trajectory: Trajectory) -> None: + """Aggregate transitions from a single trajectory to construct training samples.""" + + @abstractmethod + async def update(self) -> None: + """Update the model based on the aggregated samples.""" + + +class ChainedOptimizer(Optimizer): + """An optimizer that runs a sequence of sub-optimizers in the order they are provided.""" + + def __init__(self, *optimizers: Optimizer): + self.optimizers = optimizers + + def aggregate(self, trajectories: Iterable[Trajectory]) -> None: + for optimizer in self.optimizers: + optimizer.aggregate(trajectories) + + async def update(self) -> None: + for optimizer in self.optimizers: + await optimizer.update() diff --git a/ldp/alg/optimizer/replay_buffers.py b/ldp/alg/optimizer/replay_buffers.py new file mode 100644 index 00000000..36f48a80 --- /dev/null +++ b/ldp/alg/optimizer/replay_buffers.py @@ -0,0 +1,34 @@ +import random +from collections import UserList + + +class CircularReplayBuffer(UserList[dict]): + def resize(self, size: int): + if len(self) > size: + self.data = self.data[-size:] + + def batched_iter( + self, batch_size: int, shuffle: bool = True, infinite: bool = False + ): + while True: + indices = list(range(len(self))) + if shuffle: + random.shuffle(indices) + + for i in range(0, len(self), batch_size): + keys = self.data[0].keys() + + batch: dict[str, list] = {k: [] for k in keys} + for j in indices[i : i + batch_size]: + if self.data[j].keys() != keys: + raise RuntimeError( + "Found buffer element with inconsistent keys" + ) + + for k in keys: + batch[k].append(self.data[j][k]) + + yield batch + + if not infinite: + break diff --git a/ldp/alg/rollout.py b/ldp/alg/rollout.py new file mode 100644 index 00000000..f3b152ee --- /dev/null +++ b/ldp/alg/rollout.py @@ -0,0 +1,309 @@ +import asyncio +import itertools +import logging +import uuid +from collections.abc import Callable, Iterator, Sequence +from contextlib import contextmanager, nullcontext +from typing import Any, TypeVar, overload + +from aviary.env import Environment +from aviary.message import Message + +from ldp.agent import Agent +from ldp.alg.callbacks import Callback +from ldp.data_structures import Trajectory, Transition + +logger = logging.getLogger(__name__) + + +TEnv = TypeVar("TEnv", bound=Environment) + + +class CaughtError(Exception): + """Base class for reraised exceptions when catching is enabled.""" + + def __init__(self, original_exc: Exception): + self.original_exc = original_exc + + exc_type = "undefined" + + +class AgentError(CaughtError): + exc_type = "agent" + + +class EnvError(CaughtError): + exc_type = "env" + + +@contextmanager +def reraise_exc_as(reraise: type[CaughtError], enabled: bool) -> Iterator[None]: + try: + yield + except Exception as e: + if enabled: + logging.exception(f"Caught {reraise.exc_type} exception.") + raise reraise(e) from e + raise + + +class RolloutManager: + def __init__( + self, + agent: Agent, + catch_agent_failures: bool = True, + catch_env_failures: bool = True, + callbacks: Sequence[Callback] | None = None, + concurrency_limit: int | None = None, + ): + self.agent = agent + + self.catch_agent_failures = catch_agent_failures + self.catch_env_failures = catch_env_failures + + self.concurrency_limiter = ( + asyncio.Semaphore(concurrency_limit) if concurrency_limit else nullcontext() + ) + + self.traj_buffer: dict[str, Trajectory] = {} + self.callbacks = callbacks or [] + + @overload + async def sample_trajectories( # noqa: D418 + self, + environment_factory: Callable[[], TEnv], + batch_size: int = 1, + max_steps: int | None = None, + ) -> list[tuple[Trajectory, TEnv]]: + """Run rollouts in parallel, using a factory to construct environments. + + We will construct `batch_size` environments and run rollouts on each of them. + If `max_steps` is set, rollouts will be truncated at this value. If a rollout + has fewer than `max_steps`, then a new environment will be constructed and another + rollout will be started until `max_steps` is reached. + + Args: + environment_factory: A no-argument callable that returns + an environment instance + batch_size (int, optional): Defaults to 1. + max_steps (int | None, optional): Max steps per rollout. Defaults to None (see above). + + Returns: + list[tuple[Trajectory, Environment]]: A list of (trajectory, environment) tuples: one per rollout. + """ + + @overload + async def sample_trajectories( # noqa: D418 + self, + environments: Sequence[Environment], + max_steps: int | None = None, + ) -> list[Trajectory]: + """Run rollouts in parallel on a list of provided environments. + + Args: + environments: A list of environments to run rollouts on. + max_steps: Max steps per rollout. Defaults to None, in which case the rollouts are run + until environment returns done. + """ + + async def sample_trajectories(self, **kwargs): + if "environment_factory" in kwargs: + assert ( + "environments" not in kwargs + ), "Cannot use environment_factory with environments" + + return await self._sample_trajectories_from_env_factory( + kwargs["environment_factory"], + kwargs.get("batch_size", 1), + kwargs.get("max_steps"), + ) + + if "environments" in kwargs: + assert ( + "environment_factory" not in kwargs + ), "Cannot use environments with environment_factory" + return await self._sample_trajectories_from_envs( + kwargs["environments"], kwargs.get("max_steps") + ) + + raise TypeError( + "sample_trajectories() missing required " + "arguments 'environment_factory' or 'environments'" + ) + + async def _sample_trajectories_from_env_factory( + self, + environment_factory: Callable[[], Environment], + batch_size: int = 1, + max_steps: int | None = None, + ) -> list[tuple[Trajectory, Environment]]: + self.traj_buffer.clear() + + async def rollout_with_args(idx: int, **rollout_kwargs): + return idx, await self._rollout(**rollout_kwargs), rollout_kwargs + + accumulated_steps = [0] * batch_size + # submit initial batch of tasks + tasks = [ + asyncio.create_task( + rollout_with_args( + idx, + traj_id=uuid.uuid4().hex, + env=environment_factory(), + max_steps=max_steps, + ) + ) + for idx in range(batch_size) + ] + + results = [] + while tasks: + done, pending = await asyncio.wait( + tasks, return_when=asyncio.FIRST_COMPLETED + ) + new_tasks = [] + for task in done: + idx, traj, kwargs = await task + results.append((traj, kwargs["env"])) + accumulated_steps[idx] += len(traj.steps) + if ( + max_steps is not None + and (remaining_steps := max_steps - accumulated_steps[idx]) > 0 + ): + # submit another task if we haven't reached max_steps + new_task = asyncio.create_task( + rollout_with_args( + idx, + traj_id=uuid.uuid4().hex, + env=environment_factory(), + max_steps=remaining_steps, + ) + ) + new_tasks.append(new_task) + + tasks = list(pending) + new_tasks + + return results + + async def _sample_trajectories_from_envs( + self, + environments: Sequence[Environment], + max_steps: int | None = None, + ) -> list[Trajectory]: + self.traj_buffer.clear() + + traj_ids = [uuid.uuid4().hex for _ in range(len(environments))] + await asyncio.gather( + *( + self._rollout(*args, max_steps=max_steps) + for args in zip(traj_ids, environments, strict=True) + ) + ) + return [self.traj_buffer[traj_id] for traj_id in traj_ids] + + async def _rollout( + self, + traj_id: str, + env: Environment, + max_steps: int | None, + ) -> Trajectory: + trajectory = Trajectory(traj_id=traj_id) + + async def store_step(step: Transition): + await asyncio.gather(*[ + callback.after_transition(traj_id, self.agent, env, step) + for callback in self.callbacks + ]) + trajectory.steps.append(step) + + # Set default values to store in the buffer in case reset/init_state fail + obs: list[Message] = [] + agent_state: Any = None + + try: + with reraise_exc_as(EnvError, enabled=self.catch_env_failures): + obs, tools = await env.reset() + + with reraise_exc_as(AgentError, enabled=self.catch_agent_failures): + agent_state = await self.agent.init_state(tools) + + for timestep in itertools.count(): + step = await self._take_step(timestep, traj_id, env, agent_state, obs) + + # We assume the below won't throw a CaughtError + await store_step(step) + + # set things up for the next iteration + agent_state = step.next_agent_state + obs = step.next_observation + + if step.done: + break + + if timestep + 1 == max_steps: + trajectory.steps[-1].truncated = True + break + + except CaughtError as e: + # NOTE: This trajectory should not be used for regular training. + # We save the last transition here for debugging, etc. + await store_step( + Transition( + timestep=len(trajectory.steps), + agent_state=agent_state, + next_agent_state=None, + observation=obs, + next_observation=[], + action=None, + done=True, + metadata={"exception": repr(e.original_exc)}, + ) + ) + + self.traj_buffer[traj_id] = trajectory + return trajectory + + async def _take_step( + self, + timestep: int, + traj_id: str, + env: Environment, + agent_state: Any, + obs: list[Message], + ) -> Transition: + async with self.concurrency_limiter: # type: ignore[attr-defined] + await asyncio.gather(*[ + callback.before_transition(traj_id, self.agent, env, agent_state, obs) + for callback in self.callbacks + ]) + + with reraise_exc_as(AgentError, enabled=self.catch_agent_failures): + ( + action, + next_agent_state, + value, + ) = await self.agent.get_asv(agent_state, obs) + await asyncio.gather(*[ + callback.after_agent_get_asv(traj_id, action, next_agent_state, value) + for callback in self.callbacks + ]) + + with reraise_exc_as(EnvError, enabled=self.catch_env_failures): + next_obs, reward, done, trunc = await env.step(action.value) + await asyncio.gather(*[ + callback.after_env_step(traj_id, next_obs, reward, done, trunc) + for callback in self.callbacks + ]) + + return Transition( + timestep=timestep, + agent_state=agent_state, + next_agent_state=next_agent_state, + action=action, + reward=reward, + value=value, + observation=obs, + next_observation=next_obs, + done=done, + truncated=trunc, + ) diff --git a/ldp/alg/runners.py b/ldp/alg/runners.py new file mode 100644 index 00000000..05791773 --- /dev/null +++ b/ldp/alg/runners.py @@ -0,0 +1,313 @@ +from __future__ import annotations + +import asyncio +import math +import random +from collections.abc import Sequence +from typing import cast + +from aviary.env import Environment, TaskDataset +from pydantic import BaseModel, ConfigDict, Field +from tqdm import tqdm, trange + +from ldp.agent import Agent +from ldp.alg.callbacks import Callback +from ldp.alg.optimizer import Optimizer +from ldp.alg.rollout import RolloutManager +from ldp.data_structures import Trajectory +from ldp.graph.op_utils import eval_mode, train_mode +from ldp.graph.ops import OpResult + + +async def _run_eval_loop( + dataset: TaskDataset, + rollout_manager: RolloutManager, + batch_size: int, + num_iterations: int | None, + max_rollout_steps: int | None, + callbacks: Sequence[Callback], + shuffle: bool = False, +) -> None: + await asyncio.gather(*[callback.before_eval_loop() for callback in callbacks]) + + if num_iterations is None: + try: + num_iterations = math.ceil(len(dataset) / batch_size) + except TypeError: + raise ValueError( + "If num_iterations is not provided, the " + "dataset must be finite and implement __len__." + ) from None + + for i_iter, envs in tqdm( + enumerate(dataset.iter_batches(batch_size, shuffle=shuffle)), + desc="Evaluation Iterations", + ncols=0, + leave=False, + total=num_iterations, + ): + trajectories = await rollout_manager.sample_trajectories( + environments=envs, max_steps=max_rollout_steps + ) + + # Close the environment after we have sampled from it, + # in case it needs to tear down resources. + await asyncio.gather(*(env.close() for env in envs)) + + await asyncio.gather(*[ + callback.after_eval_step(trajectories) for callback in callbacks + ]) + + if i_iter == num_iterations - 1: + break + + await asyncio.gather(*[callback.after_eval_loop() for callback in callbacks]) + + +class EvaluatorConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + + batch_size: int = 1 + num_eval_iterations: int | None = Field( + None, + description="Number of eval iterations. " + "If not provided, will exhaust the dataset. " + "If 0, will not run the eval loop. ", + ) + max_rollout_steps: int | None = None + catch_agent_failures: bool = True + catch_env_failures: bool = True + + def make_rollout_manager( + self, agent: Agent, callbacks: Sequence[Callback] + ) -> RolloutManager: + return RolloutManager( + agent=agent, + callbacks=callbacks, + catch_agent_failures=self.catch_agent_failures, + catch_env_failures=self.catch_env_failures, + ) + + +class Evaluator: + def __init__( + self, + config: EvaluatorConfig, + agent: Agent, + dataset: TaskDataset, + callbacks: Sequence[Callback] | None = None, + ): + self.config = config + self.agent = agent + self.dataset = dataset + self.callbacks = callbacks or [] + self.rollout_manager = self.config.make_rollout_manager(agent, self.callbacks) + + @eval_mode() + async def evaluate(self, **kwargs) -> None: + """Run the agent over the provided dataset in eval mode.""" + return await self.run(**kwargs) + + async def run(self, **kwargs) -> None: + """Run the agent over the provided dataset. + + This method does not set training mode, so it can be used to collect + trajectories for offline training. + """ + await _run_eval_loop( + dataset=self.dataset, + rollout_manager=self.rollout_manager, + batch_size=self.config.batch_size, + num_iterations=self.config.num_eval_iterations, + max_rollout_steps=self.config.max_rollout_steps, + callbacks=self.callbacks, + **kwargs, + ) + + +class OnlineTrainerConfig(EvaluatorConfig): + batch_size: int + num_train_iterations: int + num_rollouts_per_env: int = Field( + 1, + description="Number of rollouts to execute for each " + "environment per training iteration.", + ) + update_every: int = Field( + 1, + description="Number of training iterations to run before updating the model.", + ) + eval_every: int | None = Field( + None, + description=( + "If set, will repeatedly evaluate on the validation set after this many" + " iterations. If unset (default), no evaluation is performed." + ), + ) + eval_before: bool = Field( + True, # noqa: FBT003 + description="If True (default), run an evaluation loop before training.", + ) + + +class OnlineTrainer: + def __init__( + self, + config: OnlineTrainerConfig, + agent: Agent, + optimizer: Optimizer, + train_dataset: TaskDataset, + eval_dataset: TaskDataset | None = None, + callbacks: Sequence[Callback] | None = None, + ): + if config.eval_every is not None and eval_dataset is None: + raise ValueError("Must specify eval_dataset if eval_every is set") + + self.config = config + self.agent = agent + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.optimizer = optimizer + self.callbacks = callbacks or [] + self.rollout_manager = self.config.make_rollout_manager( + agent=agent, callbacks=self.callbacks + ) + + async def train(self) -> None: + if self.config.eval_before: + await self._eval_loop() + + pbar = tqdm( + desc="Training Iterations", ncols=0, total=self.config.num_train_iterations + ) + + while pbar.n < self.config.num_train_iterations: + for batch in self.train_dataset.iter_batches( + self.config.batch_size, shuffle=True + ): + await self._training_step(pbar.n, batch) + pbar.update() + + if ( + self.config.eval_every is not None + and pbar.n % self.config.eval_every == 0 + ): + await self._eval_loop() + + if pbar.n == self.config.num_train_iterations: + break + + pbar.close() + + await self._eval_loop() + + @eval_mode() + async def _eval_loop(self, **kwargs) -> None: + if self.config.num_eval_iterations == 0: + return + + await _run_eval_loop( + dataset=cast(TaskDataset, self.eval_dataset), + rollout_manager=self.rollout_manager, + batch_size=self.config.batch_size, + num_iterations=self.config.num_eval_iterations, + max_rollout_steps=self.config.max_rollout_steps, + callbacks=self.callbacks, + **kwargs, + ) + + @train_mode() + async def _training_step( + self, training_step: int, envs: Sequence[Environment] + ) -> None: + training_batch: list[Trajectory] = [] + + for _ in range(self.config.num_rollouts_per_env): + trajectories = await self.rollout_manager.sample_trajectories( + environments=envs, max_steps=self.config.max_rollout_steps + ) + + # Close the environments after we have sampled from them, in case they need to tear down resources. + await asyncio.gather(*[env.close() for env in envs]) + + training_batch.extend(traj for traj in trajectories if not traj.failed) + + await self._optimizer_step(training_step, training_batch) + + await asyncio.gather(*[ + callback.after_train_step(trajectories) for callback in self.callbacks + ]) + + async def _optimizer_step( + self, training_step: int, training_batch: Sequence[Trajectory] + ) -> None: + for traj in training_batch: + for step in traj.steps: + # TODO: make this async + # step.action is not None because we checked traj.failed above + cast(OpResult, step.action).compute_grads() + + self.optimizer.aggregate(training_batch) + + if (training_step + 1) % self.config.update_every == 0: + await self.optimizer.update() + + await asyncio.gather(*[ + callback.after_update() for callback in self.callbacks + ]) + + +class OfflineTrainerConfig(BaseModel): + model_config = ConfigDict(extra="forbid") + + batch_size: int + update_every: int = Field( + 1, + description="Number of training iterations to run before updating the model.", + ) + # TODO: add some concept of eval loops + + +class OfflineTrainer: + def __init__( + self, + config: OfflineTrainerConfig, + agent: Agent, + optimizer: Optimizer, + train_trajectories: list[Trajectory], + callbacks: Sequence[Callback] | None = None, + ): + self.config = config + self.agent = agent + self.optimizer = optimizer + # copy so we can shuffle + self.train_trajectories = train_trajectories.copy() + self.callbacks = callbacks or [] + + async def train(self) -> None: + random.shuffle(self.train_trajectories) + + for training_step, i_batch_start in enumerate( + trange( + 0, + len(self.train_trajectories), + self.config.batch_size, + desc="Training iterations", + ncols=0, + ) + ): + batch = self.train_trajectories[ + i_batch_start : i_batch_start + self.config.batch_size + ] + + self.optimizer.aggregate(batch) + + if (training_step + 1) % self.config.update_every == 0: + await self.optimizer.update() + await asyncio.gather(*[ + callback.after_update() for callback in self.callbacks + ]) + + await asyncio.gather(*[ + callback.after_train_step(batch) for callback in self.callbacks + ]) diff --git a/ldp/alg/tree_search.py b/ldp/alg/tree_search.py new file mode 100644 index 00000000..1710c130 --- /dev/null +++ b/ldp/alg/tree_search.py @@ -0,0 +1,149 @@ +import asyncio +import itertools +import logging +import uuid +from collections.abc import Awaitable, Callable, Sequence +from typing import Any, cast + +from aviary.message import Message +from aviary.utils import is_coroutine_callable + +from ldp.agent import Agent +from ldp.alg.callbacks import Callback +from ldp.alg.rollout import ( + AgentError, + CaughtError, + EnvError, + RolloutManager, + TEnv, + reraise_exc_as, +) +from ldp.data_structures import Trajectory + +logger = logging.getLogger(__name__) + + +class TreeSearchRollout(RolloutManager): + def __init__( + self, + agent: Agent, + branching_factor: int, + env_clone_fn: Callable[[TEnv], Awaitable[TEnv]] | Callable[[TEnv], TEnv], + catch_agent_failures: bool = True, + catch_env_failures: bool = True, + callbacks: Sequence[Callback] | None = None, + concurrency_limit: int | None = None, + target_reward: float | None = None, + ): + super().__init__( + agent, + catch_agent_failures=catch_agent_failures, + catch_env_failures=catch_env_failures, + callbacks=callbacks, + concurrency_limit=concurrency_limit, + ) + + self.branching_factor = branching_factor + self.target_reward = ( + target_reward if target_reward is not None else float("inf") + ) + self.target_reward_hit: set[str] = set() + + self.env_clone_fn = env_clone_fn + + async def sample_trees( + self, + environments: Sequence[TEnv], + max_depth: int | None = None, + ) -> list[list[Trajectory]]: + return await asyncio.gather(*[ + self.sample_tree(env, max_depth) for env in environments + ]) + + async def sample_tree(self, env: TEnv, max_depth: int | None) -> list[Trajectory]: + max_depth_f = max_depth if max_depth is not None else float("inf") + + try: + with reraise_exc_as(EnvError, enabled=self.catch_env_failures): + obs, tools = await env.reset() + + with reraise_exc_as(AgentError, enabled=self.catch_agent_failures): + agent_state = await self.agent.init_state(tools) + + root_traj = Trajectory(traj_id=str(uuid.uuid4())) + return await self._descend(root_traj, env, agent_state, obs, max_depth_f) + + except CaughtError: + return [] + + async def _descend( + self, + branch: Trajectory, + env: TEnv, + agent_state: Any, + obs: list[Message], + max_depth: float, + ) -> list[Trajectory]: + # Descend one level in the tree, by adding branching_factor children to the branch + # Then, recurse on each child + root_traj_id = cast(str, branch.traj_id).split(":")[0] + if root_traj_id in self.target_reward_hit: + return [branch] + + timestep = len(branch.steps) + + async def inner_descend(idx: int) -> list[Trajectory]: + if is_coroutine_callable(self.env_clone_fn): + cloned_env = await self.env_clone_fn(env) # type: ignore[arg-type, misc] + else: + cloned_env = self.env_clone_fn(env) # type: ignore[arg-type] + + # Descend one step + traj_id = f"{branch.traj_id}:{idx}" + try: + step = await self._take_step( + timestep, traj_id, cloned_env, agent_state, obs + ) + except CaughtError: + # If we failed, do not extend the branch - just return an empty list + return [] + + await asyncio.gather(*[ + callback.after_transition(traj_id, self.agent, cloned_env, step) + for callback in self.callbacks + ]) + + # The original branch plus one step + extended_branch = Trajectory( + timestep=timestep + 1, traj_id=traj_id, steps=[*branch.steps, step] + ) + + if ( + step.done # Trajectory is over + or len(extended_branch.steps) >= max_depth # Hit max depth + ): + return [extended_branch] + + if ( + sum(step_.reward for step_ in extended_branch.steps) + >= self.target_reward + ): + # signal other descents to stop too + self.target_reward_hit.add(root_traj_id) + return [extended_branch] + + # Recurse + return await self._descend( + extended_branch, + cloned_env, + step.next_agent_state, + step.next_observation, + max_depth, + ) + + # Add branching_factory children + branches = await asyncio.gather(*[ + inner_descend(idx) for idx in range(self.branching_factor) + ]) + + return list(itertools.chain.from_iterable(branches)) diff --git a/ldp/data_structures.py b/ldp/data_structures.py new file mode 100644 index 00000000..4d6b43d5 --- /dev/null +++ b/ldp/data_structures.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +import json +import logging +import os +from typing import Any, ClassVar, Self + +from aviary.message import Message +from aviary.tools import ToolRequestMessage, ToolResponseMessage +from pydantic import BaseModel, ConfigDict, Field, JsonValue + +from ldp.alg.algorithms import discounted_returns +from ldp.graph.ops import OpResult + +logger = logging.getLogger(__name__) + + +class Transition(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid") + + # Sentinel value for missing observation, as opposed to empty observation + # Only used for tests; a user should never use this. + NO_OBSERVATION: ClassVar[list[Message]] = [] + + timestep: int = Field(description="Zero-indexed MDP timestep t.") + + agent_state: Any = Field( + description=( + "Agent.get_asv's input. This is `s_t` in RL terms. Note that `s_0` comes" + " from `Agent.init_state()`" + ) + ) + next_agent_state: Any = Field( + description="Agent.get_asv's output. This is s_t+1 in RL terms." + ) + + observation: list[ToolResponseMessage | Message] = Field( + description="Agent.get_asv's input. This is o_t in RL terms." + ) + next_observation: list[ToolResponseMessage | Message] = Field( + description="Environment.step output. This is o_t+1 in RL terms." + ) + + # Non-ToolRequestMessage OpResult is here to allow for testing hacks + action: OpResult[ToolRequestMessage] | OpResult | None = Field( + default=None, description="Agent.get_asv output. This is a_t in RL terms." + ) + + reward: float = Field( + default=0.0, description="Environment.step output. This is r_t in RL terms." + ) + + truncated: bool = Field( + default=False, description="timestep t's Environment.step output." + ) + done: bool = Field( + default=False, description="timestep t's Environment.step output." + ) + value: float = Field( + default=0.0, + description=( + "Value estimate output from timestep t's Agent.get_asv. This is v(s_t)" + " [state value function] or q(s_t, a_t) [state-action value]." + ), + ) + # JsonValue so we can serialize + metadata: dict[str, JsonValue] = Field(default_factory=dict) + + @property + def failed(self) -> bool: + """Get if an exception was encountered during rollout, for convenience. + + If True, this transition should not be trained on. + Failed transitions are for debugging purposes. + """ + return bool(self.metadata.get("exception")) + + +class Trajectory(BaseModel): + traj_id: str | None = None + steps: list[Transition] = Field(default_factory=list) + + @property + def failed(self) -> bool: + return any(step.failed for step in self.steps) + + @property + def done(self) -> bool: + if not self.steps: + return False + return self.steps[-1].done + + def to_jsonl(self, filename: str | os.PathLike) -> None: + with open(filename, "w") as f: + f.write(json.dumps(self.traj_id) + "\n") + f.writelines(s.model_dump_json() + "\n" for s in self.steps) + + @classmethod + def from_jsonl(cls, filename: str | os.PathLike) -> Self: + with open(filename) as f: + reader = iter(f) + traj = cls(traj_id=json.loads(next(reader))) + for json_line in reader: + traj.steps.append(Transition(**json.loads(json_line))) + return traj + + def compute_discounted_returns(self, discount: float = 1.0) -> list[float]: + return discounted_returns( + rewards=[step.reward for step in self.steps], + terminated=[step.truncated for step in self.steps], + discount=discount, + ) diff --git a/ldp/graph/__init__.py b/ldp/graph/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldp/graph/async_torch.py b/ldp/graph/async_torch.py new file mode 100644 index 00000000..e81aea41 --- /dev/null +++ b/ldp/graph/async_torch.py @@ -0,0 +1,222 @@ +__all__ = ["AsyncTorchModule", "async_protect_torch_call"] + +import asyncio +import operator +import time +from collections.abc import Callable +from contextlib import nullcontext +from typing import Any +from uuid import UUID, uuid4 + +import torch +from torch import nn +from torch.nn.functional import pad +from torch.utils.data import default_collate +from transformers.generation.utils import GenerateDecoderOnlyOutput + +_TORCH_LOCK = asyncio.Lock() + + +def _get_autocast_context(dtype: torch.dtype | None, device_type): + return ( + nullcontext() + if dtype is None + else torch.autocast(dtype=dtype, device_type=device_type) + ) + + +def _get_grad_context(no_grad: bool): + return torch.no_grad() if no_grad else nullcontext() + + +def async_protect_torch_call( + module: nn.Module, + module_call_fn: Callable = lambda m, *args, **kwargs: m(*args, **kwargs), + no_grad: bool = False, + autocast_dtype: torch.dtype | None = None, + autocast_device_type=None, +) -> Callable: + async def wrapped_call(*args, **kwargs): + async with _TORCH_LOCK: + with ( + _get_grad_context(no_grad), + _get_autocast_context(autocast_dtype, autocast_device_type), + ): + return module_call_fn(module, *args, **kwargs) + + return wrapped_call + + +# TODO: make max_wait_interval adaptive. We can use a heuristic like +# half the average time for a single call. If it's not provided, enable +# adaptive mode. + + +class AsyncTorchModule: + def __init__( + self, + module: nn.Module, + batch_size: int, + max_wait_interval: float, + collate_fn: Callable = default_collate, + decollate_fn: Callable = list, + module_call_fn: Callable = lambda m, *args, **kwargs: m(*args, **kwargs), + ): + """A wrapper around a torch.nn.Module that allows for async calls. + + Usage: + ```python + my_model = nn.Linear(2, 2) + async_model = AsyncTorchModule(my_model, batch_size=4, max_wait_interval=0.01) + + result = await asyncio.gather(*[ + async_model(input=torch.rand(2)) for _ in range(10) + ]) + ``` + In the above example, note that we are making 10 calls with a batch size of 4. + The first two groups of 4 will be batched and executed as they arrive. The last 2 + will wait for max_wait_interval and then execute. + + NOTE: This module is not thread-safe and currently always operates in no_grad() mode. + It may be possible to relax the latter constraint. + + Args: + module: The PyTorch module to wrap. + batch_size: The target batch size to use when calling the module. As soon as + batch_size calls are made, a forward pass is executed. + max_wait_interval: The maximum time to wait for a batch to fill up before + executing the calls we have buffered. + collate_fn: A PyTorch collate function to use when batching inputs. Defaults to + the PyTorch default_collate. + decollate_fn: Kind of like the opposite of collate_fn. This function should take + the batched output and return an ordered list of outputs. Defaults to list. + module_call_fn: Function that allows for customizing the call to the module. + """ + self.module = module + self.batch_size = batch_size + self.timeout = max_wait_interval + self.collate_fn = collate_fn + self.decollate_fn = decollate_fn + self.module_call_fn = module_call_fn + + self._work_buffer: list[tuple[float, UUID, dict[str, Any]]] = [] + self._result_buffer: dict[UUID, Any] = {} + self._lock = asyncio.Lock() + + def _get_dtype_and_device(self) -> tuple[torch.dtype, torch.device]: + param = next(self.module.parameters()) + return param.dtype, param.device + + async def __call__(self, **kwargs): + request_id = uuid4() + request_ts = time.time() + + async with self._lock: + # Make sure only one coroutine is using the work buffer at a time + self._work_buffer.append((request_ts, request_id, kwargs)) + + while True: + async with self._lock: + # Only one coroutine allowed in here when: + # - modifying the result buffer + # - modifying the work buffer + # - calling the module (handled by _TORCH_LOCK) + + if request_id in self._result_buffer: + # Our request was fulfilled by this or another coroutine! + return self._result_buffer.pop(request_id) + + # Try to run a batch + await self._batched_call() + + # Sleep, to let another coroutine take over if it needs to + await asyncio.sleep(0.0) + + async def _batched_call(self): + now = time.time() + + # sort by oldest requests first + self._work_buffer.sort(key=operator.itemgetter(0)) + + if ( + len(self._work_buffer) >= self.batch_size + or now - self._work_buffer[0][0] > self.timeout + ): + # if we're over batch size or have at least one input waiting for + # more than timeout, pull out a batch to run + batch = self._work_buffer[: self.batch_size] + self._work_buffer = self._work_buffer[self.batch_size :] + + # Construct the batch tensors + sample_kwargs = [x[2] for x in batch] + batch_kwargs = self.collate_fn(sample_kwargs) + + # Wrap the forward call to be async-safe using the options we want + dtype, device = self._get_dtype_and_device() + protected_call = async_protect_torch_call( + self.module, + module_call_fn=self.module_call_fn, + no_grad=True, + autocast_dtype=dtype, + autocast_device_type=device.type, + ) + + # Call the module and store results + batched_results = await protected_call( + **batch_kwargs, + ) + request_ids = [x[1] for x in batch] + results = self.decollate_fn(batched_results) + self._result_buffer.update(zip(request_ids, results, strict=True)) + + @staticmethod + def collate_fn_transformers_model( + samples: list[dict[str, torch.Tensor]], agg_keys: set[str] | None = None + ) -> dict[str, torch.Tensor]: + """Collates and pads a batch of samples for input into a huggingface transformer model.""" + if agg_keys is None: + agg_keys = {"input_ids", "attention_mask"} + seq_lens = [inp["input_ids"].shape[1] for inp in samples] + max_seq_len = max(seq_lens) + n_pads = [max_seq_len - seq_len for seq_len in seq_lens] + + batch = { + key: torch.cat( + [ + pad(inp[key], (0, n_pad), value=0) + for inp, n_pad in zip(samples, n_pads, strict=True) + ], + dim=0, + ) + for key in agg_keys + } + + # Treating other keys as constant kwargs params for the model + other_keys = set(samples[0].keys()) - agg_keys + for key in other_keys: + for sample in samples: + if key not in sample: + raise ValueError(f"Missing key {key} in sample.") + if key in batch and batch[key] != sample[key]: + raise ValueError( + f"Constant kwarg key {key} has different values within batch." + ) + batch[key] = sample[key] + + return batch + + @staticmethod + def decollate_fn_transformers_decoder( + batched_output: GenerateDecoderOnlyOutput, + ) -> list[GenerateDecoderOnlyOutput]: + """Decollates a batched output from a huggingface transformer decoder.""" + batch_size = batched_output.sequences.size(0) + + return [ + GenerateDecoderOnlyOutput({ + "sequences": batched_output.sequences[i][None], + "scores": [v[i][None] for v in batched_output.scores], + # Ignore other keys for now + }) + for i in range(batch_size) + ] diff --git a/ldp/graph/common_ops.py b/ldp/graph/common_ops.py new file mode 100644 index 00000000..e432e4b4 --- /dev/null +++ b/ldp/graph/common_ops.py @@ -0,0 +1,370 @@ +"""This module contains commonly-used Op implementations.""" + +from __future__ import annotations + +import asyncio +import functools +import inspect +import logging +from collections.abc import Awaitable, Callable +from functools import lru_cache +from typing import Generic, TypeVar, cast, overload + +import numpy as np +import tree +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from aviary.utils import is_coroutine_callable +from pydantic import BaseModel + +from ldp.graph.gradient_estimators import assign_constant_grads +from ldp.graph.memory import Memory, MemoryModel, UIndexMemoryModel +from ldp.graph.op_utils import CallID, get_call_id, get_training_mode +from ldp.graph.ops import GradInType, Op, OpCtx, ResultOrValue, TOutput +from ldp.llms import EmbeddingModel, LLMModel, LLMResult + +logger = logging.getLogger(__name__) + + +def logsumexp(a: np.ndarray | list[float]) -> float: + a_max = np.max(a) + return a_max + np.log(np.sum(np.exp(a - a_max))) + + +class IdentityOp(Op[TOutput]): + """ + An operation that simply returns the input value. + + NOTE: this op is equivalent to FxnOp(lambda x: x). + """ + + async def forward(self, value: TOutput) -> TOutput: + # We assume value already has the correct run_id from its producer + return value + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + return [], {"value": grad_output} + + +class StopGradOp(IdentityOp[TOutput]): + """Pass through Op that terminates gradients in the backward pass.""" + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + return assign_constant_grads(input_args, input_kwargs, None) + + +TConfig = TypeVar("TConfig", bound=BaseModel | dict) + + +class ConfigOp(Op[TConfig], Generic[TConfig]): + """An operation that contains a configuration object.""" + + def __init__(self, config: TConfig): + self.config = config + + async def forward(self) -> TConfig: + return self.config + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + # Check that the grad_output structure is consistent with our config + tree.assert_same_structure( + grad_output, ctx.get(call_id, "output").value, check_types=False + ) + + # Terminate here - we're a leaf since a ConfigOp takes no inputs + return [], {} + + +TResult = TypeVar("TResult") + + +class Cacheable(Generic[TResult]): + def __init__(self, co: Awaitable[TResult]) -> None: + self.co = co + self.done = False + self.result: TResult | None = None + self.lock = asyncio.Lock() + + async def get_result(self) -> TResult | None: + async with self.lock: + if not self.done: + self.result = await self.co + self.done = True + return self.result + + def __await__(self): + return self.get_result().__await__() + + +def async_cache(func): + @functools.lru_cache(maxsize=1024) + @functools.wraps(func) + def wrapper(*args, **kwargs): + return Cacheable(co=func(*args, **kwargs)) + + return wrapper + + +class FxnOp(Op[TOutput]): + """ + Wrap a function for a straight through gradient approximation for all args/kwargs. + + Basically, consider the fxn as a transform upon the inputs during the forward pass, + and propagating the same gradient for all inputs during the backward pass. + """ + + def __init__( + self, + fxn: Callable[..., TOutput] | Callable[..., Awaitable[TOutput]], + cache: bool = False, + fxn_name: str | None = None, # useful for lambdas + ): + if cache: + self.fxn = ( + async_cache(fxn) if is_coroutine_callable(fxn) else lru_cache()(fxn) + ) + else: + self.fxn = fxn + + try: + self.fxn_name = fxn_name or fxn.__name__ + except AttributeError: # unittest.mock.Mock or lambda + self.fxn_name = str(fxn) + + # override forward args with the signature of the function + fwd_sig = inspect.signature(self.fxn) + self._fwd_args = list(fwd_sig.parameters.values()) + + def __repr__(self) -> str: + return f"{type(self).__name__} {self.fxn_name} ({id(self)})" + + async def forward(self, *args, **kwargs) -> TOutput: + if is_coroutine_callable(self.fxn): + return await self.fxn(*args, **kwargs) + return self.fxn(*args, **kwargs) + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + return assign_constant_grads(input_args, input_kwargs, 0.0) + + +class PromptOp(FxnOp[str]): + """An operation that formats kwargs into a prompt string.""" + + async def _fxn( + self, prompt_kwargs: dict[str, str] | None = None, **kwargs: str + ) -> str: + return self.prompt.format(**{**(prompt_kwargs or {}), **kwargs}) + + def __init__(self, prompt: str): + self.prompt = prompt + super().__init__(fxn=self._fxn, cache=False) + + def __repr__(self) -> str: + # we want to use Op.__repr__, not FxnOp.__repr__ + return super(FxnOp, self).__repr__() + + +class LLMCallOp(Op[Message]): + """An operation for LLM calls interaction.""" + + def __init__(self, num_samples_logprob_estimate: int = 0) -> None: + super().__init__() + # Trainable is metadata that an optimizer can use this. It enables things + # like (remote) fine-tuning with OpenAI + self.trainable: bool = False + self.num_samples_partition_estimate = num_samples_logprob_estimate + + @overload + async def forward( + self, + config: dict, + msgs: list[Message], + tools: list[Tool] = ..., + tool_choice: Tool | str | None = LLMModel.TOOL_CHOICE_REQUIRED, + ) -> ToolRequestMessage: ... + + @overload + async def forward( + self, + config: dict, + msgs: list[Message], + tools: None = None, + tool_choice: str | None = LLMModel.TOOL_CHOICE_REQUIRED, + ) -> Message: ... + + async def forward( + self, + config: dict, + msgs: list[Message], + tools: list[Tool] | None = None, + tool_choice: Tool | str | None = LLMModel.TOOL_CHOICE_REQUIRED, + ) -> Message: + model = LLMModel(config=config) + + result = await model.call(messages=msgs, tools=tools, tool_choice=tool_choice) + if result.messages is None: + raise ValueError("No messages returned") + + # if not set, assume temp = 1. TODO: when would it not be set? + temperature: float = (result.config or {}).get("temperature", 1.0) + + # Compute a Monte Carlo estimate of the logprob of this sequence at the given temperature. + logprob = await self.compute_logprob( + raw_log_p=result.logprob, + temperature=temperature, + model=model, + messages=msgs, + tools=tools, + tool_choice=tool_choice, + ) + + call_id = get_call_id() + self.ctx.update(call_id, "result", result) + # This is the logprob of this sequence according to the raw model, without + # any temperature/top-p distribution shaping. + self.ctx.update(call_id, "raw_logprob", result.logprob) + + self.ctx.update(call_id, "temperature", temperature) + self.ctx.update(call_id, "logprob", logprob) + + return result.messages[0] + + async def compute_logprob( + self, + raw_log_p: float | None, + temperature: float, + model: LLMModel, + **model_kwargs, + ) -> float | None: + """This method computes a Monte Carlo estimate of logprob for a given temperature. + + It takes as input the logprob at T=1. The derivation is in Section 5.1 of the Aviary notes. + """ + if raw_log_p is None or self.num_samples_partition_estimate == 0: + return None + + if temperature == 1: + return raw_log_p + + if temperature == 0: + return 1.0 + + # TODO: Try using n completions from a single API call. Need to modify LLMModel.call to do this, since + # it currently only checks completion.choices[0]. Would reduce cost for long prompts. + # TODO: think about whether sampling params besides temperature need to be accounted for, like top_p + results = await asyncio.gather(*[ + model.call(temperature=1, **model_kwargs) + for _ in range(self.num_samples_partition_estimate) + ]) + temp_factor = 1.0 / temperature - 1.0 + + # Partition function estimate: + # Z_T = E_P[ e^(lnP/T - lnP) ] + log_Z_T = logsumexp([ + temp_factor * cast(float, result.logprob) for result in results + ]) - np.log(self.num_samples_partition_estimate) + + return (raw_log_p / temperature) - log_Z_T + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + # By default, we want to descend into config, but not msgs/tools/tool_choice + # Essentially: we can think of each config field as an independent parameter, + # but not necessarily each message or tool. + + # tree.map_structure allows us to assign a gradient of 0 to all fields of config + grad_config = tree.map_structure(lambda _: 0.0, input_kwargs["config"]) + grad_kwargs = {"config": grad_config} + if "msgs" in input_kwargs: + grad_kwargs["msgs"] = 0.0 + if "tools" in input_kwargs: + grad_kwargs["tools"] = 0.0 + + return [], grad_kwargs + + def get_examples(self) -> list[tuple[LLMResult, float]]: + examples = [ + ( + self.ctx.get(c, "result", None), + # get 'model' kwarg from grad_input + # use default of None if not found + self.ctx.get(c, "grad_input", default=([], {}))[1].get("model", None), + ) + for c in self.get_call_ids() + ] + # filter out the None values + return [(e, w) for e, w in examples if e is not None] + + +class MemoryOp(Op[list[Memory]]): + """An operation for managing memory retrieval and storage.""" + + def __init__(self, memory_model: MemoryModel | None = None): + super().__init__() + self.memory_model = memory_model or UIndexMemoryModel( + embedding_model=EmbeddingModel.from_name("sparse") + ) + + async def forward( + self, + query: str, + input: str | None = None, # noqa: A002 + matches: int = 3, + ) -> list[Memory]: + """Retrieve relevant memories based on a query.""" + if get_training_mode(): + call_id = get_call_id() + self.ctx.update(call_id, "query", query) + self.ctx.update(call_id, "memory_input", input) + return await self.memory_model.get_memory(query, matches) + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + """Backward pass for memory retrieval - goes back to item.""" + return assign_constant_grads(input_args, input_kwargs, 0.0) diff --git a/ldp/graph/gradient_estimators.py b/ldp/graph/gradient_estimators.py new file mode 100644 index 00000000..6041a36f --- /dev/null +++ b/ldp/graph/gradient_estimators.py @@ -0,0 +1,144 @@ +"""This module defines various gradient estimators that can be patched in during backward passes.""" + +from __future__ import annotations + +import logging +from functools import partial +from typing import Any + +import tree + +from ldp.graph.op_utils import CallID +from ldp.graph.ops import GradInType, OpCtx, OpResult, ResultOrValue + +logger = logging.getLogger(__name__) + + +def assign_constant_grads( + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + value: Any, + descend: bool = True, +): + if not descend: + return [value] * len(input_args), dict.fromkeys(input_kwargs, value) + + # descend into nested objects + arg_grads = [ + tree.map_structure(lambda _: value, OpResult.unwrap_value(arg)) + for arg in input_args + ] + kwarg_grads = { + k: tree.map_structure(lambda _: value, OpResult.unwrap_value(v)) + for k, v in input_kwargs.items() + } + return arg_grads, kwarg_grads + + +def straight_through_estimator( + ctx: OpCtx, # noqa: ARG001 + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, # noqa: ARG001 + descend: bool = True, +) -> GradInType: + return assign_constant_grads(input_args, input_kwargs, grad_output, descend=descend) + + +def stop_grad( + ctx: OpCtx, # noqa: ARG001 + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, # noqa: ARG001 + call_id: CallID, # noqa: ARG001 +) -> GradInType: + # don't descend - want gradients to stop at the OpResult level + return assign_constant_grads(input_args, input_kwargs, None, descend=False) + + +def zero_estimator( + ctx: OpCtx, # noqa: ARG001 + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, # noqa: ARG001 + call_id: CallID, # noqa: ARG001 +) -> GradInType: + """Sets the gradient of all inputs to zero. + + Note that this is not the same as truncating the compute graph (stop_grad), + since upstream nodes can still optimize their logprobs. The zero estimator + the unbiased choice if we have no information about the gradient. + """ + return assign_constant_grads(input_args, input_kwargs, 0.0) + + +def llm_straight_through_estimator( + ctx: OpCtx, # noqa: ARG001 + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, # noqa: ARG001 +) -> GradInType: + """Straight-through for an LLM: descend into the config, but not msgs/tools/tool_calls. + + See LLMCallOp.backward() for more details on this choice. + Don't bother checking that input_args/input_kwargs have the right structure, + since compute_grads() will raise if not. + """ + config_grad = tree.map_structure( + lambda _: grad_output, OpResult.unwrap_value(input_kwargs["config"]) + ) + grad_args = [grad_output] * len(input_args) + grad_kwargs = {"config": config_grad} + if "msgs" in input_kwargs: + grad_kwargs["msgs"] = grad_output + if "tools" in input_kwargs: + grad_kwargs["tools"] = grad_output + + return grad_args, grad_kwargs + + +def assign_default_grads( + input_grads: GradInType, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + default_grad_val: float = 0.0, +) -> GradInType: + """Sets a default value of default_grad_val for every element in input_grads. + + Example: + - input_kwargs = {"a": {"b": 1, "c": 2}}, + - input_grad_kwargs = {"a": {"b": 0.1}} + Output: input_grads[1] = {"a": {"b": 0.1, "c": default_grad_val}} + + Returns: + GradInType: A tuple containing the updated input_grad_args and + input_grad_kwargs with default values assigned where necessary. + """ + + def get_nested_value(data: tree.Structure, path: list) -> Any: + """Traverse given path over data and return the value at the end of the path.""" + try: + current_value = data + for key in path: + current_value = current_value[key] + except (KeyError, IndexError): + return None # If path not found, return None (than default_grad_val will be assigned) + else: + return current_value + + def assign_default_gradients( + input_grads: tree.Structure, path: list, _value: Any + ) -> Any: + """Assign default_grad_val where grads are missing.""" + return get_nested_value(input_grads, path) or default_grad_val + + input_args_kwargs = (input_args, input_kwargs) + input_grads = tree.map_structure_with_path( + partial(assign_default_gradients, input_grads), + input_args_kwargs, + ) + + tree.assert_same_structure(input_grads, input_args_kwargs) + return input_grads diff --git a/ldp/graph/memory.py b/ldp/graph/memory.py new file mode 100644 index 00000000..9ab15fd1 --- /dev/null +++ b/ldp/graph/memory.py @@ -0,0 +1,146 @@ +import asyncio +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from typing import Any, ClassVar, Generic, TypeVar +from uuid import UUID + +import numpy as np +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + field_validator, + model_validator, +) +from usearch.index import Index + +from ldp.llms import EmbeddingModel + + +class Memory(BaseModel): + """A single memory about an input, output, and value tuple. + + A memory is a record of an input, output, and resulting value. Typically used + for prompting a language model. Or, it could be about a whole forward pass where + the input is the observation and the output is the action taken. + The query is optionally different and used for + retrieving the memory. For example, it could be much larger because it won't + be formatted in the resulting prompt. + """ + + query: str = Field( + description="String to be embedded into a retrieval key for a memory index." + ) + input: str | None = Field( + default=None, + description=( + "Some input (e.g. prompt to LLM, observation). If None (default), the input" + " is set to match the query." + ), + ) + output: str = Field(description="Some output (e.g. tool selection).") + value: float = Field(description="Measure of the output's quality (e.g. loss).") + run_id: UUID | None = Field( + default=None, + description=( + "Associate run_id for debugging purposes to trace " + "which forward pass generated the memory." + ), + ) + template: str = "Input: {input}\nOutput: {output}\nValue: {value}" + + @model_validator(mode="before") + @classmethod + def ensure_query(cls, data: Any) -> Any: + """Copy input to match the query if input is None.""" + if isinstance(data, dict) and data.get("input") is None: + data["input"] = data["query"] + return data + + def __str__(self) -> str: + return self.template.format(**self.model_dump()) + + +TIndex = TypeVar("TIndex") + + +class MemoryModel(BaseModel, Generic[TIndex], ABC): + """A collection of memories with retrieval.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + embedding_model: EmbeddingModel = Field( + default_factory=lambda: EmbeddingModel.from_name( + "hybrid-text-embedding-3-small" + ) + ) + memories: dict[int, Memory] = Field(default_factory=dict) + _index: TIndex + _index_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock) + + @field_validator("memories") + @classmethod + def enforce_empty(cls, v: dict) -> dict: + if v: + raise ValueError("Memories must be empty at construction time.") + return v + + async def add_memory(self, memory: Memory) -> None: + key = await self._add_to_index( + embedding=await self.embedding_model.embed_text(memory.query) + ) + self.memories[key] = memory + + DEFAULT_MEMORY_MATCHES: ClassVar[int] = 3 + + async def get_memory( + self, query: str, matches: int = DEFAULT_MEMORY_MATCHES + ) -> list[Memory]: + return await self._search_index( + embedding=await self.embedding_model.embed_text(query), matches=matches + ) + + def __len__(self) -> int: + return len(self.memories) + + @asynccontextmanager + async def safe_access_index(self) -> AsyncIterator[TIndex]: + """Get the internal Index under the protection of an internal Lock.""" + # pylint bug, SEE: https://github.com/pylint-dev/pylint/issues/9813 + async with self._index_lock: # pylint: disable=not-async-context-manager + yield self._index + + @abstractmethod + async def _add_to_index(self, embedding: np.ndarray) -> int: + """Add an embedding to the internal Index and return its key.""" + + @abstractmethod + async def _search_index( + self, embedding: np.ndarray, matches: int = DEFAULT_MEMORY_MATCHES + ) -> list[Memory]: + """Search the internal Index, returning a 'matches' amount of Memories.""" + + +class UIndexMemoryModel(MemoryModel[Index]): + """Memory model using a U-Search index.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if not self.embedding_model.dimensions: + raise TypeError("Specify dimensions to the embedding model.") + self._index = Index(ndim=self.embedding_model.dimensions) + + async def _add_to_index(self, embedding: np.ndarray) -> int: + async with self.safe_access_index() as index: + return int(index.add(len(self.memories), embedding)) + + async def _search_index( + self, embedding: np.ndarray, matches: int = MemoryModel.DEFAULT_MEMORY_MATCHES + ) -> list[Memory]: + async with self.safe_access_index() as index: + search_matches = index.search(embedding, matches) + # mypy doesn't respect "old style" __getitem__/__len__ as iterable, + # so we have this ignore. SEE: https://github.com/python/mypy/issues/9737 + return [self.memories[m.key] for m in search_matches] # type: ignore[union-attr] diff --git a/ldp/graph/modules/__init__.py b/ldp/graph/modules/__init__.py new file mode 100644 index 00000000..990360e9 --- /dev/null +++ b/ldp/graph/modules/__init__.py @@ -0,0 +1,31 @@ +""" +A module is a subgraph of a compute graph that can be exposed like a single node/op. + +An analogous entity in PyTorch is torch.nn.Module. +""" + +from .llm_call import ParsedLLMCallModule +from .react import ( + MalformedMessageError, + ReActModule, + ToolDescriptionMethods, + parse_message, +) +from .reflect import ReflectModule, ReflectModuleConfig +from .thought import ThoughtModule +from .value_function import DQNOp, DQNPolicyModule, EmbeddingDQNOp, EpsilonGreedyOp + +__all__ = [ + "DQNOp", + "DQNPolicyModule", + "EmbeddingDQNOp", + "EpsilonGreedyOp", + "MalformedMessageError", + "ParsedLLMCallModule", + "ReActModule", + "ReflectModule", + "ReflectModuleConfig", + "ThoughtModule", + "ToolDescriptionMethods", + "parse_message", +] diff --git a/ldp/graph/modules/llm_call.py b/ldp/graph/modules/llm_call.py new file mode 100644 index 00000000..43a786fc --- /dev/null +++ b/ldp/graph/modules/llm_call.py @@ -0,0 +1,30 @@ +from collections.abc import Callable, Iterable +from typing import Any, Generic, TypeVar + +from aviary.message import Message + +from ldp.graph.common_ops import ConfigOp, FxnOp, LLMCallOp +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult + +TParsedMessage = TypeVar("TParsedMessage", bound=Message) + + +class ParsedLLMCallModule(Generic[TParsedMessage]): + """Module for a processing-based tool selection, with a learnable configuration.""" + + def __init__( + self, llm_model: dict[str, Any], parser: Callable[..., TParsedMessage] + ): + self.config_op = ConfigOp[dict](config=llm_model) + self.llm_call_op = LLMCallOp() + self.parse_msg_op = FxnOp(parser) + + @compute_graph() + async def __call__( + self, messages: Iterable[Message], *parse_args, **parse_kwargs + ) -> tuple[OpResult[TParsedMessage], Message]: + raw_result = await self.llm_call_op(await self.config_op(), msgs=messages) + return await self.parse_msg_op( + raw_result, *parse_args, **parse_kwargs + ), raw_result.value diff --git a/ldp/graph/modules/react.py b/ldp/graph/modules/react.py new file mode 100644 index 00000000..57b04c3f --- /dev/null +++ b/ldp/graph/modules/react.py @@ -0,0 +1,272 @@ +import ast +import logging +import re +import textwrap +from collections.abc import Iterable +from enum import StrEnum +from typing import Any + +from aviary.message import Message +from aviary.tools import Tool, ToolCall, ToolRequestMessage + +from ldp.graph.common_ops import FxnOp, PromptOp +from ldp.graph.modules import ParsedLLMCallModule +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult +from ldp.llms import prepend_sys + +_DEFAULT_PROMPT_TEMPLATE = textwrap.dedent( + """ Answer the following questions as best you can. You have access to the following tools: + + {{tools}} + + Use the following format: + + {fields} + ... (this {fields_description} can repeat N times) + + Example: + + {example}""" +) +REACT_DEFAULT_PROMPT_TEMPLATE = _DEFAULT_PROMPT_TEMPLATE.format( + fields=( + "Thought: you should always think about what to do" + "\nAction: the action to take, should be one of [{tool_names}]" + "\nAction Input: comma separated list of inputs to action as python tuple" + "\nObservation: the result of the action" + ), + fields_description="Thought/Action/Action Input/Observation", + example=( + "Thought: I need to use the get_weather tool" + "\nAction: get_weather" + '\nAction Input: "New York", 7' + "\nObservation: The 7 day forecast for New York is [...]" + ), +) +ACT_DEFAULT_PROMPT_TEMPLATE = _DEFAULT_PROMPT_TEMPLATE.format( + fields=( + "Action: the action to take, should be one of [{tool_names}]" + "\nAction Input: comma separated list of inputs to action as python tuple" + "\nObservation: the result of the action" + ), + fields_description="Action/Action Input/Observation", + example=( + "Action: get_weather" + '\nAction Input: "New York", 7' + "\nObservation: The 7 day forecast for New York is [...]" + ), +) + + +class MalformedMessageError(ValueError): + """Error to throw if some aspect of a ToolRequestMessage is malformed.""" + + @classmethod + def react_parser_log_filter(cls, record: logging.LogRecord) -> bool: + """ + Filter out common parsing failures not worth looking into from logs. + + Returns: + False if the LogRecord should be filtered out, otherwise True to keep it. + """ + # NOTE: match both this Exception type's name and its content, to be robust + return not all(x in record.msg for x in (cls.__name__, EMPTY_CONTENT_BASE_MSG)) + + +# Define separately so we can filter out this message type +EMPTY_CONTENT_BASE_MSG = "No content in message" + + +def parse_message(m: Message, tools: list[Tool]) -> ToolRequestMessage: # noqa: C901 + """ + Parse an Act or ReAct Message into a ToolRequestMessage. + + Args: + m: Input raw message. + tools: Tools used to confirm a valid tool selection + + Returns: + Parsed ToolRequestMessage. + """ + if not m.content: + raise MalformedMessageError( + f"{EMPTY_CONTENT_BASE_MSG} of type {type(m).__name__}." + ) + + message_content = m.content + # strip (and overwrite) up to end of action input + loc = message_content.find("Action Input:") + if loc != -1: + loc = message_content.find("\n", loc) + message_content = message_content[: loc if loc > 0 else None] + # we need to override the message too - don't want the model to hallucinate + m.content = message_content + + action_args: tuple[Any, ...] = () + # https://regex101.com/r/qmqZ7Z/1 + action_input = re.search(r"Input:[ \t]*([ \S]*)", m.content) + # only parse if it takes arguments + if action_input and action_input.group(1).strip(): + input_str = action_input.group(1).strip() + # if it has commas and no quotes, it's almost certainly a tuple without + # parentheses, so we add them + if "," in input_str and not ( + input_str.startswith("(") and input_str.endswith(")") + ): + input_str = f"({input_str})" + try: + if input_str.startswith("(") and input_str.endswith(")"): + # Handle tuples and quoted strings inside + if '"' not in input_str and "'" not in input_str: + # Add quotes around each element within parentheses if they are not already quoted + # and if they are not purely numbers. There may exist a killer regex for this + # but I am a simple man + + # just catches things like "1.1".isnumeric() == False + # so we can't just use isnumeric + def is_number(s: str) -> bool: + try: + float(s) + except ValueError: + return False + return True + + input_str = ", ".join( + f'"{e.strip()}"' if not is_number(e) else str(e) + for e in input_str.strip("()").split(",") + if e.strip() + ) + input_str = f"({input_str})" + eval_result = ast.literal_eval(input_str) + action_args = ( + (eval_result,) + if not isinstance(eval_result, tuple) + else eval_result + ) + else: + # Convert to int or float if possible + try: + action_args = (ast.literal_eval(input_str),) + except (ValueError, SyntaxError): + action_args = (input_str,) + except Exception as exc: + raise MalformedMessageError( + f"Action Input {input_str} could not be parsed." + ) from exc + + if len(action_args) == 1 and isinstance(action_args[0], tuple): + action_args = action_args[0] + + action = re.search(r"Action:[ \t]*(\S*)", m.content) + if not action: + raise MalformedMessageError("Action not emitted.") + tool_name = action.group(1).strip() + # have to match up name to tool to line up args in order + try: + tool = next(t for t in tools if t.info.name == tool_name) + except StopIteration as exc: + raise MalformedMessageError(f"Tool {tool_name} not found in tools.") from exc + if len(action_args) < len(tool.info.parameters.required): + raise MalformedMessageError( + f"Action Input {action_args!r} shorter than {tool.info.name!r} tool's" + " parameters." + ) + + # Anecdotally we've observed thought also often captures the action + # NOTE: for Act agents there is no Thought, so the regex will return None + thought = re.search(r"Thought:[ \t]*(.*)", m.content) + return ToolRequestMessage( + content=thought.group(1) if thought else None, + tool_calls=[ToolCall.from_tool(tool, *action_args)], + ) + + +class ToolDescriptionMethods(StrEnum): + """Possible methods of describing the tools.""" + + STR = "describe_str" + XML = "describe_xml" + JSON = "describe_json" + + def get_prompt_prefix(self) -> str: + """Get the prefix to put in front of the prompt.""" + if self == self.STR: + return "" + if self == self.JSON: + return "Tools are specified with a JSON schema." + return "Tools are specified with an XML schema." + + +class ReActModule: + """An Act or ReAct module built to work with chat models. + + Paper: https://arxiv.org/abs/2210.03629 + + The ReAct style is like so, and note Act style has no 'Thought: ' entries: + System: + Answer the following questions as best you can. You have access to the following tools: + + {tools} + + Use the following format: + + Thought: you should always think about what to do + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can repeat N times) + User: + {questions} + Assistant: + Thought: + Action: + Action Input: + User: + Observation: + Assistant: + Thought: + Action: + Action Input: + ... + """ + + @staticmethod + def parse_message(m: Message, tools: list[Tool]) -> ToolRequestMessage: + return parse_message(m, tools) + + async def _create_system_prompt(self, tools: list[Tool]) -> OpResult[str]: + tool_info = "\n".join([ + getattr(t.info, self._tool_description_method)() for t in tools + ]) + if prefix := self._tool_description_method.get_prompt_prefix(): + tool_info = f"{prefix}\n{tool_info}" + tool_names = ", ".join([t.info.name for t in tools]) + return await self.prompt_op( + schema_type=self._tool_description_method.value, + tools=tool_info.strip(), + tool_names=tool_names, + ) + + def __init__( + self, + llm_model: dict[str, Any], + sys_prompt: str = REACT_DEFAULT_PROMPT_TEMPLATE, + tool_description_method: ToolDescriptionMethods = ToolDescriptionMethods.STR, + ): + self.prompt_op = PromptOp(sys_prompt) + self._tool_description_method = tool_description_method + llm_model["stop"] = ["Observation:"] + self.package_msg_op = FxnOp(prepend_sys) + self.tool_select_module = ParsedLLMCallModule[ToolRequestMessage]( + llm_model=llm_model, parser=self.parse_message + ) + + @compute_graph() + async def __call__( + self, messages: Iterable[Message], tools: list[Tool] + ) -> tuple[OpResult[ToolRequestMessage], Message]: + packaged_msgs = await self.package_msg_op( + messages, sys_content=await self._create_system_prompt(tools) + ) + return await self.tool_select_module(packaged_msgs, tools=tools) # type: ignore[arg-type] diff --git a/ldp/graph/modules/reflect.py b/ldp/graph/modules/reflect.py new file mode 100644 index 00000000..fc397db6 --- /dev/null +++ b/ldp/graph/modules/reflect.py @@ -0,0 +1,59 @@ +from typing import Any + +from aviary.message import Message +from pydantic import BaseModel, Field + +from ldp.graph.common_ops import ConfigOp, FxnOp, LLMCallOp, PromptOp +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import ResultOrValue +from ldp.llms import append_to_sys +from ldp.llms.prompts import indent_xml + + +class ReflectModuleConfig(BaseModel): + """Configuration for the ReflectModuleConfig.""" + + llm_model: dict[str, Any] = Field( + default={"model": "gpt-3.5-turbo"}, + description="Starting configuration for the LLM model.", + ) + + +class ReflectModule: + """A module that simply gives an LLM to reflect on an input.""" + + def __init__(self, start_config: ReflectModuleConfig): + self.llm_call_op = LLMCallOp() + self.prompt_op = PromptOp( + "Consider a proposed response based on context. Reflect on the response" + " within tags then conclude with a possibly revised response" + " within tags." + ) + self.config_op = ConfigOp[ReflectModuleConfig](config=start_config) + self.llm_config_op = FxnOp[dict](lambda c: c.llm_model) + self.package_fxn = FxnOp(append_to_sys) + + def extract_msg(msg: Message, backup_response: str) -> str: + msg_str = msg.content + if msg_str and "" in msg_str: + return msg_str.split("")[1].split("")[ + 0 + ] + if msg_str and "" in msg_str: + return msg_str.split("")[1].split("")[0] + return backup_response + + self.extract_msg = FxnOp(extract_msg) + + @compute_graph() + async def __call__( + self, context: ResultOrValue[str], response: ResultOrValue[str] + ) -> ResultOrValue[str]: + llm_config = await self.llm_config_op(await self.config_op()) + sys_str = await self.prompt_op() + user_str = indent_xml( + f"{context}{response}" + ) + msg = await self.package_fxn(user_str, sys_str) + llm_result = await self.llm_call_op(llm_config, msg) + return await self.extract_msg(llm_result, response) diff --git a/ldp/graph/modules/thought.py b/ldp/graph/modules/thought.py new file mode 100644 index 00000000..d723114c --- /dev/null +++ b/ldp/graph/modules/thought.py @@ -0,0 +1,44 @@ +from collections.abc import Iterable, Mapping +from typing import Any + +from aviary.message import Message +from aviary.tools import ToolRequestMessage + +from ldp.graph.common_ops import FxnOp, PromptOp +from ldp.graph.modules import ParsedLLMCallModule +from ldp.graph.op_utils import compute_graph +from ldp.graph.ops import OpResult +from ldp.llms import prepend_sys_and_append_sys + + +class ThoughtModule: + @staticmethod + def _downcast_to_message(message: Message | ToolRequestMessage) -> Message: + if isinstance(message, ToolRequestMessage): + # Downcast into a normal Message if the LLM tried to call tools + return Message(role=message.role, content=message.content) + return message + + def __init__( + self, llm_model: dict[str, Any], first_sys_prompt: str, second_sys_prompt: str + ): + self.first_sys_prompt_op = PromptOp(first_sys_prompt) + self.second_sys_prompt_op = PromptOp(second_sys_prompt) + self.package_msg_op = FxnOp(prepend_sys_and_append_sys) + self.llm_call = ParsedLLMCallModule[Message]( + llm_model, parser=self._downcast_to_message + ) + + @compute_graph() + async def __call__( + self, + messages: Iterable[Message], + first_prompt_kwargs: Mapping[str, Any], + second_prompt_kwargs: Mapping[str, Any], + ) -> OpResult[Message]: + packaged_msgs = await self.package_msg_op( + messages, + initial_sys_content=await self.first_sys_prompt_op(**first_prompt_kwargs), + final_sys_content=await self.second_sys_prompt_op(**second_prompt_kwargs), + ) + return (await self.llm_call(packaged_msgs))[0] # type: ignore[arg-type] diff --git a/ldp/graph/modules/value_function.py b/ldp/graph/modules/value_function.py new file mode 100644 index 00000000..f093be72 --- /dev/null +++ b/ldp/graph/modules/value_function.py @@ -0,0 +1,366 @@ +from __future__ import annotations + +import asyncio +import inspect +import itertools +import json +import random +from abc import abstractmethod +from collections.abc import Mapping, Sequence +from contextlib import contextmanager +from copy import deepcopy +from typing import ClassVar, Generic, TypeVar + +import torch +import tree +from aviary.message import Message +from aviary.tools import ToolRequestMessage +from torch import nn + +from ldp.graph.async_torch import AsyncTorchModule +from ldp.graph.common_ops import FxnOp +from ldp.graph.gradient_estimators import assign_constant_grads +from ldp.graph.op_utils import ( + CallID, + compute_graph, + get_call_id, + get_training_mode, +) +from ldp.graph.ops import GradInType, Op, OpCtx, OpResult, ResultOrValue +from ldp.graph.torch_ops import store_tensor_inputs +from ldp.llms import ( + EmbeddingModel, + HybridEmbeddingModel, + LiteEmbeddingModel, + SparseEmbeddingModel, +) + + +def get_msg_content(msg: Message) -> str: + if isinstance(msg, ToolRequestMessage): + content_lines = [msg.content or ""] + for tc in msg.tool_calls: + tcf = tc.function + content_lines.append(tcf.name + "(" + json.dumps(tcf.arguments) + ")") + return "\n".join(content_lines) + + return msg.content or "" + + +class DQNOp(Op): + network: nn.Module + target_network: nn.Module + async_network: AsyncTorchModule + _network_fwd_args: list[inspect.Parameter] + + CTX_TENSOR_INPUT_KEY: ClassVar[str] = "tensor_input" + + def __init_subclass__(cls) -> None: + """Register self._network_fwd_args, setup async & target networks.""" + super().__init_subclass__() + + original_init = cls.__init__ + + def init_with_network_setup(self, *args, **kwargs): + original_init(self, *args, **kwargs) + + network_fwd_sig = inspect.signature(self.network.forward) + self._network_fwd_args = list(network_fwd_sig.parameters.values()) + + # Set up a helper to handle async forward calls + if not hasattr(self, "async_network"): + self.async_network = AsyncTorchModule( + self.network, batch_size=8, max_wait_interval=0.01 + ) + + # Create a target network if the implementation didn't do so + if not hasattr(self, "target_network"): + self.target_network = deepcopy(self.network) + + # Make sure the target network is not trainable + for param in self.target_network.parameters(): + param.requires_grad = False + + cls.__init__ = init_with_network_setup # type: ignore[method-assign] + + @contextmanager + def use_target_network(self): + self.async_network.module = self.target_network + yield + self.async_network.module = self.network + + @abstractmethod + async def forward(self, state_action: str) -> float: + pass + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + # DQN does not send gradients back, since we do not need to compute dlnP/dQ + return assign_constant_grads(input_args, input_kwargs, None) + + def _store_tensor_inputs( + self, + tensor_args: Sequence[torch.Tensor], + tensor_kwargs: Mapping[str, torch.Tensor], + ): + # In order to optimize the neural network, we must store its inputs for the + # optimizer to later use. All subclassed DQNOp.forwards must call this method. + store_tensor_inputs( + self.ctx, + key=self.CTX_TENSOR_INPUT_KEY, + tensor_args=tensor_args, + tensor_kwargs=tensor_kwargs, + fwd_args=self._network_fwd_args, + detach=True, # we don't need to backpropagate through the inputs + ) + + +# Should consider supporting non-Messages, but merge_messages relies on +# this constraint for now. +TAction = TypeVar("TAction", bound=Message) + + +def merge_messages(state: Sequence[Message], action: Message) -> str: + all_msgs = [*state, action] + return "\n".join(get_msg_content(msg) for msg in all_msgs) + + +class DQNPolicyModule(Generic[TAction]): + """Module that implements a DQN and epsilon-greedy policy. + + Given a state and list of action candidates, this module will score each action + and then select an action according to an epsilon-greedy policy. If the set of + candidate actions is not fixed, but sampled probabilistically, this is + equivalent to StochDQN (algo 4 of https://arxiv.org/pdf/2405.10310). + """ + + def __init__( + self, dqn: DQNOp | None = None, epsilon: float = 0.0, actions_only: bool = False + ): + self.actions_only = actions_only + self.merge_messages = FxnOp[str](merge_messages) + + self.dqn = dqn if dqn is not None else EmbeddingDQNOp() + + self.merge_q_action = FxnOp[tuple[float, TAction]]( + lambda q, a: (q, a), fxn_name="merge_q_action" + ) + + self.action_selector = EpsilonGreedyOp(epsilon=epsilon) + self.action_extractor = FxnOp[TAction](lambda x: x[1]) + + @compute_graph() + async def __call__( + self, + state: ResultOrValue[Sequence[Message]], + *actions: ResultOrValue[TAction], + ) -> tuple[float, OpResult[TAction]]: + """Forward pass of the policy. + + Args: + state: Agent state s_t + actions: Proposed actions + + Returns: + (q, a_t): The Q value of the selected action a_t and the + action itself. We return both in case an agent wants + to return Q as the value estimate. + """ + state_actions = await asyncio.gather(*[ + self.merge_messages([] if self.actions_only else state, action) + for action in actions + ]) + qs = await asyncio.gather(*[ + self.dqn(state_action) for state_action in state_actions + ]) + all_qs_and_actions = await asyncio.gather( + *list(itertools.starmap(self.merge_q_action, zip(qs, actions, strict=True))) + ) + + q_and_action = await self.action_selector( + *all_qs_and_actions, state_actions=[s_a.value for s_a in state_actions] + ) + action = await self.action_extractor(q_and_action) + q = q_and_action.value[0] # NOTE: q is not part of the compute graph! + + return q, action + + +def make_regressor( + input_dim: int, + hidden_dim: int | list[int], + num_layers: int, + dropout: float, + layer_norm: bool, + dtype: torch.dtype | str, +) -> nn.Module: + if num_layers < 1: + raise ValueError("Must have at least one layer.") + + if num_layers == 1: + # useful for debugging + return nn.Linear(input_dim, 1) + + if isinstance(hidden_dim, int): + hidden_dim = [hidden_dim] * (num_layers - 1) + if not len(hidden_dim) == num_layers - 1: + raise ValueError( + f"Expected {num_layers - 1} hidden dimensions, got {len(hidden_dim)}." + ) + + layers: list[nn.Module] = [] + layers.extend( + [nn.Linear(input_dim, hidden_dim[0])] + + ([nn.LayerNorm(hidden_dim[0])] if layer_norm else []) + + [nn.SiLU(), nn.Dropout(dropout)] + ) + for i in range(1, num_layers - 1): + layers.extend( + [nn.Linear(hidden_dim[i - 1], hidden_dim[i])] + + ([nn.LayerNorm(hidden_dim[i])] if layer_norm else []) + + [nn.SiLU(), nn.Dropout(dropout)] + ) + layers.append(nn.Linear(hidden_dim[-1], 1)) + # initialize the last layer at 0 + layers[-1].weight.data.fill_(0) + layers[-1].bias.data.fill_(0) + + model = nn.Sequential(*layers) + + if isinstance(dtype, str): + dtype = getattr(torch, dtype) + return model.to(dtype) + + +class EmbeddingDQNOp(DQNOp): + def __init__( + self, + *, + dense_embedding: str = "text-embedding-3-small", + dense_embedding_dim: int = 512, + sparse_embedding_dim: int = 0, + hidden_dim: int | list[int] = 64, + num_layers: int = 3, + dropout: float = 0.0, + layer_norm: bool = True, + device: str | torch.device = "cpu", + dtype: str | torch.dtype = torch.float32, + fwd_async_batch_size: int = 32, + fwd_async_max_wait: float = 0.03, + ): + emb_models: list[EmbeddingModel] = [] + if dense_embedding_dim > 0: + emb_models.append( + LiteEmbeddingModel( + name=dense_embedding, + dimensions=dense_embedding_dim, + embed_kwargs={ + "caching": True, + # LiteLLM docs (https://docs.litellm.ai/docs/proxy/reliability) are not + # clear on which to use, and the code appears to deprecate num_retries + # for the completions API (but not embeddings). Setting both to be safe + # and future-proof. No harm in being reduundant. + "max_retries": 5, + "num_retries": 5, + }, + ) + ) + if sparse_embedding_dim > 0: + emb_models.append(SparseEmbeddingModel(dimensions=sparse_embedding_dim)) + self.embedding = HybridEmbeddingModel(models=emb_models) + + if isinstance(device, str): + device = torch.device(device) + self.device = device + + embedding_dim = dense_embedding_dim + sparse_embedding_dim + self.network = make_regressor( + input_dim=embedding_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + dropout=dropout, + layer_norm=layer_norm, + dtype=dtype, + ).to(device) + + self.async_network = AsyncTorchModule( + self.network, + batch_size=fwd_async_batch_size, + max_wait_interval=fwd_async_max_wait, + ) + self.async_network = AsyncTorchModule( + self.network, + batch_size=fwd_async_batch_size, + max_wait_interval=fwd_async_max_wait, + ) + + async def forward(self, state_action: str) -> float: + x = torch.tensor(await self.embedding.embed_text(state_action)) + if get_training_mode(): + self._store_tensor_inputs(tensor_args=(x,), tensor_kwargs={}) + + self.async_network.module.eval() + return (await self.async_network(input=x.to(self.device))).item() + + +class EpsilonGreedyOp(Op[tuple[float, Message]]): + """Epsilon-greedy action selection from a list of scored actions. + + In training mode, will pick a random action with probability epsilon. Otherwise, + greedily picks the highest-scoring action. Gradients only flow back to the selected + action and corresponding Q value. + """ + + def __init__(self, epsilon: float = 0.0): + if not 0 <= epsilon <= 1: + raise ValueError( + "Epsilon-greedy sampling requires epsilon to be in [0, 1]." + ) + self.epsilon = epsilon + + async def forward( + self, + *scored_actions: tuple[float, Message], + state_actions: list[str], + ) -> tuple[float, Message]: + if get_training_mode() and random.random() < self.epsilon: + # random + i_best = random.randint(0, len(scored_actions) - 1) + best = scored_actions[i_best] + + else: + # greedy + i_best, best = max(list(enumerate(scored_actions)), key=lambda x: x[1][0]) + + if get_training_mode(): + # Record which we selected. all other branches will be pruned + # during backward pass + self.ctx.update(get_call_id(), "i_selected", i_best) + # Record the (s_t, a_t) candidate pairs for the optimizer to compute + # max_a' Q(s, a') during the update. + self.ctx.update(get_call_id(), "state_actions", state_actions) + + return best + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + n_scored_actions = len(input_args) + i_selected = ctx.get(call_id, "i_selected") + return ( + [grad_output if i == i_selected else None for i in range(n_scored_actions)], + {"state_actions": None}, + ) diff --git a/ldp/graph/op_utils.py b/ldp/graph/op_utils.py new file mode 100644 index 00000000..21e00c0e --- /dev/null +++ b/ldp/graph/op_utils.py @@ -0,0 +1,148 @@ +import contextvars +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from typing import NamedTuple +from uuid import UUID, uuid4 + +from aviary.utils import is_coroutine_callable + + +class CallID(NamedTuple): + run_id: UUID + fwd_id: UUID + + def __repr__(self) -> str: + return f"{self.run_id}:{self.fwd_id}" + + +_RUN_ID = contextvars.ContextVar[UUID]("run_id") +_CALL_ID = contextvars.ContextVar[CallID]("call_id") + + +@asynccontextmanager +async def compute_graph() -> AsyncIterator[UUID]: + """Initialize a compute graph by setting a run ID. + + If a run ID is already set (i.e. we are already inside a + get_run_id() context), then the existing run ID is returned. + Otherwise, a new UUID is created. + """ + try: + # If a run ID is set, return it. + run_id = _RUN_ID.get() + token: contextvars.Token | None = None + except LookupError: + # If not, make a new run ID. + run_id = uuid4() + token = _RUN_ID.set(run_id) + + try: + yield run_id + finally: + if token is not None: + # token is not None if we made a new run ID. In that case, + # reset the context to its previous state. + _RUN_ID.reset(token) + + +def get_run_id() -> UUID: + """Get the current run ID.""" + try: + return _RUN_ID.get() + except LookupError: + raise RuntimeError( + "Attempting to access run ID, but not inside compute graph context." + ) from None + + +@asynccontextmanager +async def op_call() -> AsyncIterator[CallID]: + """Decorate an op call with a call ID. + + If a call ID is already set (i.e. we are already inside an op call), + then the existing call ID is returned. + Otherwise, a new UUID is created. + """ + # Get run_id in case we need to construct a CallID, but this also serves + # as a check that we're inside compute_graph() + run_id = get_run_id() + + try: + call_id = _CALL_ID.get() + token: contextvars.Token | None = None + except LookupError: + fwd_id = uuid4() + call_id = CallID(run_id, fwd_id) + token = _CALL_ID.set(call_id) + + try: + yield call_id + finally: + if token is not None: + # token is not None if we made a new call ID. In that case, + # reset the context to its previous state. + _CALL_ID.reset(token) + + +def get_call_id() -> CallID: + """Get the current call ID.""" + try: + return _CALL_ID.get() + except LookupError: + raise RuntimeError( + "Attempting to access call ID, but not inside op call context." + ) from None + + +_TRAINING_MODE = contextvars.ContextVar[bool]("training_mode", default=True) + + +def get_training_mode() -> bool: + """Get the current training mode.""" + return _TRAINING_MODE.get() + + +def set_training_mode(training_mode: bool) -> None: + """Set the training mode.""" + _TRAINING_MODE.set(training_mode) + + +class _TrainingModeContext: + def __init__(self, training_mode: bool): + self.training_mode = training_mode + self.prev_training_mode = get_training_mode() + + def __call__(self, fn=None): + if fn is None: + return self + + if is_coroutine_callable(fn): + + async def wrapper(*args, **kwargs): + async with self: + return await fn(*args, **kwargs) + + else: + + def wrapper(*args, **kwargs): + with self: + return fn(*args, **kwargs) + + return wrapper + + def __enter__(self): + self.prev_training_mode = get_training_mode() + set_training_mode(self.training_mode) + + def __exit__(self, exc_type, exc_value, traceback): + set_training_mode(self.prev_training_mode) + + async def __aenter__(self): + self.__enter__() + + async def __aexit__(self, exc_type, exc_value, traceback): + self.__exit__(exc_type, exc_value, traceback) + + +train_mode = _TrainingModeContext(training_mode=True) +eval_mode = _TrainingModeContext(training_mode=False) diff --git a/ldp/graph/ops.py b/ldp/graph/ops.py new file mode 100644 index 00000000..7cd5617f --- /dev/null +++ b/ldp/graph/ops.py @@ -0,0 +1,513 @@ +"""This module defines the Op class and its helper classes.""" + +from __future__ import annotations + +import inspect +import itertools +import logging +from abc import ABC, abstractmethod +from collections import defaultdict +from collections.abc import Callable, Collection, Iterator, Mapping, Sequence +from typing import Any, ClassVar, Generic, TypeAlias, TypeVar +from uuid import UUID + +import networkx as nx +import tree +from pydantic import BaseModel, ConfigDict, Field + +from ldp.graph.op_utils import ( + CallID, + compute_graph, + get_call_id, + get_training_mode, + op_call, +) +from ldp.graph.pydantic_patch import PatchGenericPickle + +logger = logging.getLogger(__name__) + + +GradOutType: TypeAlias = tree.Structure | None # None means the gradient has terminated +GradInType: TypeAlias = tuple[Sequence[GradOutType], Mapping[str, GradOutType]] +BackwardsType: TypeAlias = Callable[ + # Call signature of Op.backward or GradientEstimator.backward + ["OpCtx", list, dict, tree.Structure, "CallID"], GradInType +] +TOutput = TypeVar("TOutput") + + +class OpResult(BaseModel, Generic[TOutput], PatchGenericPickle): + """Result of a forward pass, used in the compute graph.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + call_id: CallID + op_name: str = Field( + description="Name of the Op instance (i.e. op.name) that produced this OpResult." + ) + op_class_name: str = Field( + description="Fully qualified name of the class " + "of the Op that produced this OpResult." + ) + + value: TOutput + + def __hash__(self) -> int: + return hash(self.call_id) + + def compute_grads( + self, + grad_output: tree.Structure | None = None, + backward_fns: Mapping[str | type, BackwardsType] | None = None, + ) -> None: + """ + Compute the gradient of the backward graph in-place. + + This executes topological traversal. + It is up to the Op to: + (a) define the backward computation + (b) store internal gradients for optimizer updates. + """ + # call ID -> [d op(x) / d x] for each op that consumes x + grad_outputs: dict[CallID, list[tree.Structure]] = defaultdict(list) + + # grad_outputs stores a list of output grads (corresponding to each consuming op call). + # Since the root node is not consumed by any other node, we create a singleton list here. + # If None was passed, set it to 0 so that we don't prune the compute graph here. + grad_outputs[self.call_id] = [grad_output] if grad_output is not None else [0.0] + + # We will traverse the graph in reverse topological order + for node in self.traverse(): + # get output gradients + grad_output = grad_outputs[node.call_id] + if not grad_output: + # compute graph terminated + continue + # Make sure structure of grads match before summing + try: + [tree.assert_same_structure(grad_output[0], g) for g in grad_output[1:]] + except ValueError as e: + raise ValueError( + f"Mismatched gradient structures in compute graph for at Op: {self.op_name}." + ) from e + aggregated_grad_output = tree.map_structure(lambda *x: sum(x), *grad_output) # noqa: FURB111 + + input_args, input_kwargs = node.inputs + arg_grads, kwarg_grads = node._run_backward( + input_args, + input_kwargs, + aggregated_grad_output, + node._resolve_backward_impl(backward_fns), + ) + + for a, g in zip(input_args, arg_grads, strict=True): + # Must have exact match between input_args and arg_grads + # Only propagate gradients to input OpResults if grad is not None + if g is not None and isinstance(a, OpResult): + grad_outputs[a.call_id].append(g) + + if kwarg_grads.keys() != input_kwargs.keys(): + raise ValueError( + f"Mismatch between grads returned in Op.backward and its input kwargs. " + f"Expected {input_kwargs.keys()}, got {kwarg_grads.keys()}." + ) + for k, a in input_kwargs.items(): + # input_kwargs.keys() may be a subset of kwarg_grads.keys() if defaults + # are specified + if (g := kwarg_grads[k]) is not None and isinstance(a, OpResult): + grad_outputs[a.call_id].append(g) + + def _resolve_backward_impl( + self, backward_fns: Mapping[str | type, BackwardsType] | None + ) -> BackwardsType: + backward_fns = backward_fns or {} + for key in (self.ctx.op_name, self.op_class_name, self.op_class): + if key in backward_fns: + return backward_fns[key] + return self.op_class.backward + + def _run_backward( + self, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: tree.Structure, + backward_fn: BackwardsType, + ) -> GradInType: + self._update_ctx("grad_output", grad_output) + unwrapped_input_args = [OpResult.unwrap_value(a) for a in input_args] + unwrapped_input_kwargs = { + k: OpResult.unwrap_value(v) for k, v in input_kwargs.items() + } + input_grads = backward_fn( + self.ctx, + unwrapped_input_args, + unwrapped_input_kwargs, + grad_output, + self.call_id, + ) + self._update_ctx("grad_input", input_grads) + return input_grads + + @property + def op_class(self) -> type[Op]: + return _OP_CLASS_REGISTRY[self.op_class_name] + + @property + def ctx(self) -> OpCtx: + # This is a property to avoid serialization of the context. There are two reasons: + # 1. Contexts have their own persist() mechanism for serialization + # 2. We'd prefer contexts to be created via get_or_create(). Allowing for arbitrary + # deserialization makes it hard to enforce that. + return OpCtx.get_or_create(self.op_name) + + def get_compute_graph(self, backward: bool = True) -> nx.DiGraph: + """Construct a directed graph of the compute graph that led to this OpResult. + + Args: + backward: If True (default), constructs the backwards graph in which outputs + point to inputs. If False, constructs the forward call graph. + For most cases (e.g. backprop), backward=True is desirable. + + Returns: + A digraph in which nodes are OpResults. + """ + + def add_edges(graph: nx.DiGraph, node: OpResult) -> None: + """Recursively add edges to the input graph.""" + input_args, input_kwargs = node.inputs + for x in itertools.chain(input_args, input_kwargs.values()): + if isinstance(x, OpResult): + edge = (node, x) if backward else (x, node) + graph.add_edge(*edge) + add_edges(graph, x) + + graph = nx.DiGraph() + graph.add_node(self) + add_edges(graph, self) + + return graph + + def traverse( + self, + topological_order: bool = True, + filter_fn: Callable[[OpResult], bool] = lambda _: True, + ) -> Iterator[OpResult]: + """Traverse the compute graph that led to this OpResult. + + Args: + topological_order: If True, traverse the backwards graph in topological + order. This requires having the whole graph in memory. If False, + traverse the backwards graph in depth-first order. This can be done + lazily and is useful if we are trying to hydrate the graph node-by-node. + Most user-facing cases can leave this as True. Defaults to True. + filter_fn: Will only yield nodes that pass this filter function. Note that + nodes that fail will still be traversed. + + Yields: + An iterator over the nodes of this graph. + """ + if topological_order: + G = self.get_compute_graph() + for node in nx.topological_sort(G): + if filter_fn(node): + yield node + + else: + # If not topological order, do a recursive depth-first traversal. + # Note that, when traversing a node, its children do not need to be available + # yet. This allows us to lazily load nodes when hydrating from a ctx backend. + if filter_fn(self): + yield self + input_args, input_kwargs = self.inputs + for a in itertools.chain(input_args, input_kwargs.values()): + if isinstance(a, OpResult): + # Recursively apply depth-first traversal on each node + yield from a.traverse(topological_order=False) + + @property + def inputs(self) -> tuple[list[ResultOrValue], dict[str, ResultOrValue]]: + return self._get_from_ctx("input") + + @property + def logprob(self) -> float | None: + return self._get_from_ctx("logprob", default=None) + + @property + def grad(self) -> tree.Structure | None: + """Returns `d ln(P_{compute_graph}) / d self` or None if gradients have not been computed.""" + return self._get_from_ctx("grad_output") + + @property + def run_id(self) -> UUID: + return self.call_id.run_id + + @staticmethod + def unwrap_value(result: ResultOrValue) -> Any: + if isinstance(result, OpResult): + return result.value + return result + + def __repr__(self) -> str: + return ( + f"OpResult(op={self.op_class_name}:{self.op_name}, " + f"call_id={self.call_id}, value={self.value!r})" + ) + + def __str__(self) -> str: + return str(self.value) + + def _get_from_ctx(self, key: str, **kwargs): + if self.call_id is None: + raise ValueError( + "Attempting to access context but compute graph " + "is not available for this OpResult." + ) + + return self.ctx.get(call_id=self.call_id, key=key, **kwargs) + + def _update_ctx(self, key: str, value: Any): + if self.call_id is None: + raise RuntimeError( + "Attempting to update context but compute graph " + "is not available for this OpResult." + ) + + self.ctx.update(call_id=self.call_id, key=key, value=value) + + +ResultOrValue: TypeAlias = OpResult[TOutput] | TOutput + +# Sentinel value for get() default +NOT_FOUND = object() + + +class OpCtx(BaseModel): + # A global registry of contexts. We'd prefer to use an existing context + # for an Op if it already has been created. Also useful for persist_all() + _CTX_REGISTRY: ClassVar[dict[str, OpCtx]] = {} + + op_name: str + + data: dict = Field( + default_factory=lambda: defaultdict(dict), + exclude=True, + description="Maps run_id -> (fwd_id, key) -> value. " + "data is excluded from model_dump() etc because we do " + "not use Pydantic to persist context information. That " + "should be done via the DB backend instead. OpCtx will " + "serialize op_name, which is enough to rehydrate " + "from the DB.", + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._CTX_REGISTRY[self.op_name] = self + + @classmethod + def get_or_create(cls, op_name: str) -> OpCtx: + """Return an OpCtx corresponding to the Op with the given name.""" + try: + return cls._CTX_REGISTRY[op_name] # Get + except KeyError: + return cls(op_name=op_name) # Create + + def get(self, call_id: CallID, key: str, default: Any = NOT_FOUND) -> Any: + """Get an attribute with an optional default, emulating dict.get.""" + value = self.data[call_id.run_id].get((call_id.fwd_id, key), default) + if value is NOT_FOUND: + raise KeyError(f"call_id={call_id}, key='{key}' not found in context") + return value + + def update(self, call_id: CallID, key: str, value: Any): + self.data[call_id.run_id][(call_id.fwd_id, key)] = value + + @property + def call_ids(self) -> set[CallID]: + return { + CallID(run_id, fwd_id) + for run_id, calls in self.data.items() + for fwd_id, _ in calls + } + + def get_input_grads(self, call_id: CallID) -> GradInType: + # TODO: this function name is confusing. Let's deprecate it. We only use it + # in tests as far as I can tell. + try: + return self.get(call_id, "grad_input") + except KeyError as exc: + raise ValueError( + f"No gradients have been computed for call_id={call_id}." + ) from exc + + +def resolve_fully_qualified_name(cls: type) -> str: + return f"{cls.__module__}.{cls.__name__}" + + +# A global registry of Op classes, so we can look up backward() implementations +# without needing an instantiated Op. +_OP_CLASS_REGISTRY: dict[str, type[Op]] = {} + + +class Op(ABC, Generic[TOutput]): + """ + An operation that is 'differentiable' and can be used in an optimizer. + + Think torch.autograd.Function that can also be applied to non-differentiable + operations like prompt template formatting or Python function calls. + + These form a forward computation graph when composed with other Ops via + __call__. In training mode, this graph is constructed dynamically. + """ + + # Name is not guaranteed to be unique. Reasons: + # 1. We definitely don't want it to be unique when recreating a compute graph + # for training on previously-collected data. In that case, we want the Op's + # name to be the same as it was before, to match up contexts/OpResults + # 2. Uniqueness could make some DB lookups faster, but I don't think we run the + # risk of OpCtx clobbers as long as call_id (which is guaranteed to be unique) + # is always used as part of the key. + name: str + ctx: OpCtx + _fwd_args: list[inspect.Parameter] + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + _OP_CLASS_REGISTRY[resolve_fully_qualified_name(cls)] = cls + + def __new__(cls, *args, **kwargs): + instance = super().__new__(cls) + + # Needs to be overridden by caller if this Op is to have + # a unique name in the compute graph. c.f. Agent.__init_subclass__ + # for an example of how to do this. + instance.set_name(cls.__name__) + + # Set an attribute to help us map positional forward arguments to parameter + # names, for the backward pass. We do this on the instance and not cls b/c + # some instancees may override (e.g FxnOp). + fwd_sig = inspect.signature(instance.forward) + instance._fwd_args = list(fwd_sig.parameters.values()) + + return instance + + def set_name(self, name: str) -> None: + self.name = name + self.ctx = OpCtx.get_or_create(name) + + def __repr__(self) -> str: + return f"{self.__class__.__name__} (name={self.name}, id={id(self)})" + + @abstractmethod + async def forward(self, *args, **kwargs) -> TOutput: + """ + Forward pass of the Op. Must accept call_id as an argument. + + Returns: + Depending on this Op's purpose, the return may be considered an action + (e.g. a tool call) or it may not (e.g. a loss calculation). + """ + + @classmethod + @abstractmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs, + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + """ + Backward pass of the Op. + + Args: + ctx: Context that was used during the forward pass. + input_args: Variable-length input arguments passed to forward, i.e. + via *args. + input_kwargs: All other arguments passed to forward pass. + grad_output: A list of backpropagated gradients from each consumer + of the output of the forward pass. It is up to the implementation + to decide how to aggregate these gradients (e.g. in most cases summing). + call_id: Call ID of the forward pass. + + Returns: + grad_input: `d log(p) / d input` for each input to the forward pass. + It should include gradients for all input positional and keyword + arguments. Set to None for gradients that should terminate. + """ + + def get_call_ids(self, run_ids: Collection[UUID] | None = None) -> set[CallID]: + call_ids = self.ctx.call_ids + return ( + call_ids + if run_ids is None + else {c for c in call_ids if c.run_id in run_ids} + ) + + # This compute_graph() decoration will do nothing if we are already inside a compute graph. + # We add it here in case we are calling a bare op(), in which case we want a graph + # with a single node. + @compute_graph() + @op_call() + async def __call__(self, *args, **kwargs) -> OpResult[TOutput]: + call_id = get_call_id() + + if not all( + arg.call_id.run_id == call_id.run_id + for arg in itertools.chain(args, kwargs.values()) + if isinstance(arg, OpResult) + ): + raise RuntimeError( + "All args and kwargs must have the same run_id as the call_id's run_id. " + "Consider using @compute_graph() decorator to ensure this." + ) + + # we're over-saving here - can explore later if memory usage is high + # unpack the args and kwargs from the result holders + unpacked_args = [(a.value if isinstance(a, OpResult) else a) for a in args] + unpacked_kwargs = { + k: v.value if isinstance(v, OpResult) else v for k, v in kwargs.items() + } + + if get_training_mode(): + # If training, save the inputs for the backward pass + # Map positional arguments to keyword arguments to make backward pass easier + for i_arg, (arg, param) in enumerate( + # strict=False b/c not all params in _fwd_args will be in args (i.e. defaults and **kwargs) + zip(args, self._fwd_args, strict=False) + ): + # Don't need to check for too many args or collisions with kwargs, since forward() + # will raise an exception anyway + if param.kind == inspect.Parameter.VAR_POSITIONAL: + # *args, so scoop up the rest of the arg tuple. + var_args = list(args[i_arg:]) + break + + # Normal positional arg + kwargs[param.name] = arg + else: + var_args = [] # there were no *args if we got here + + self.ctx.update(call_id, "input", (var_args, kwargs)) + + # actually call forward pass with unpacked args and kwargs + result = await self.forward(*unpacked_args, **unpacked_kwargs) + t_output: type[TOutput] = type(result) + + # Now package up my result so it can be consumed by other calls. + # Explicitly specify t_output. OpResult[TOutput] returns a generic object + op_result = OpResult[t_output]( # type: ignore[valid-type] + value=result, + call_id=call_id, + op_name=self.name, + op_class_name=resolve_fully_qualified_name(type(self)), + ) + + if get_training_mode(): + self.ctx.update(call_id, "output", op_result) + + return op_result + + def get_input_grads(self, call_id: CallID) -> GradInType: + return self.ctx.get_input_grads(call_id) diff --git a/ldp/graph/pydantic_patch.py b/ldp/graph/pydantic_patch.py new file mode 100644 index 00000000..29cedf86 --- /dev/null +++ b/ldp/graph/pydantic_patch.py @@ -0,0 +1,50 @@ +import sys +from typing import Generic, override + +from pydantic import BaseModel + + +# Copied from https://github.com/pydantic/pydantic/issues/9390#issuecomment-2143939391 +class PatchGenericPickle: + """A mixin that allows generic pydantic models to be serialized and deserialized with pickle. + + Notes + ---- + In general, pickle shouldn't be encouraged as a means of serialization since there are better, + safer options. In some cases e.g. Streamlit's `@st.cache_data there's no getting around + needing to use pickle. + + As of Pydantic 2.7, generics don't properly work with pickle. The core issue is the following + 1. For each specialized generic, pydantic creates a new subclass at runtime. This class + has a `__qualname__` that contains the type var argument e.g. `"MyGeneric[str]"` for a + `class MyGeneric(BaseModel, Generic[T])`. + 2. Pickle attempts to find a symbol with the value of `__qualname__` in the module where the + class was defined, which fails since Pydantic defines that class dynamically at runtime. + Pydantic does attempt to register these dynamic classes but currently only for classes + defined at the top-level of the interpreter. + + See Also + -------- + - https://github.com/pydantic/pydantic/issues/9390 + """ # noqa: D416 + + @classmethod + @override + def __init_subclass__(cls, **kwargs): + # Note: we're still in __init_subclass__, not yet in __pydantic_init_subclass__ + # not all model_fields are available at this point. + super().__init_subclass__(**kwargs) + + if not issubclass(cls, BaseModel): + raise TypeError( + "PatchGenericPickle can only be used with subclasses of pydantic.BaseModel" + ) + if not issubclass(cls, Generic): # type: ignore[arg-type] + raise TypeError("PatchGenericPickle can only be used with Generic models") + + qualname = cls.__qualname__ + declaring_module = sys.modules[cls.__module__] + if qualname not in declaring_module.__dict__: + # This should work in all cases, but we might need to make this check and update more + # involved e.g. see pydantic._internal._generics.create_generic_submodel + declaring_module.__dict__[qualname] = cls diff --git a/ldp/graph/torch_ops.py b/ldp/graph/torch_ops.py new file mode 100644 index 00000000..05b56a28 --- /dev/null +++ b/ldp/graph/torch_ops.py @@ -0,0 +1,137 @@ +import inspect +from collections.abc import Mapping, Sequence +from typing import Any, ClassVar + +import torch +from torch import nn + +from ldp.graph.async_torch import async_protect_torch_call +from ldp.graph.op_utils import CallID, get_call_id, get_training_mode +from ldp.graph.ops import GradInType, Op, OpCtx, ResultOrValue + + +class TorchOp(Op[torch.Tensor]): + """An operation that wraps a PyTorch module.""" + + CTX_TENSOR_INPUT_KEY: ClassVar[str] = "tensor_input" + + def __init__(self, module: nn.Module): + super().__init__() + self.module = module + + # override forward args with the signature of the function + fwd_sig = inspect.signature(self.module.forward) + self._fwd_args = list(fwd_sig.parameters.values()) + + def __str__(self) -> str: + return f"{type(self).__name__} {type(self.module).__name__} ({id(self)})" + + async def forward(self, *args, **kwargs: Any) -> torch.Tensor: + tensor_args = [ + arg + if isinstance(arg, torch.Tensor) + else torch.tensor(arg, requires_grad=True) + for arg in args + ] + tensor_kwargs = { + k: v if isinstance(v, torch.Tensor) else torch.tensor(v, requires_grad=True) + for k, v in kwargs.items() + } + + is_training = get_training_mode() + + if is_training: + store_tensor_inputs( + ctx=self.ctx, + key=self.CTX_TENSOR_INPUT_KEY, + tensor_args=tensor_args, + tensor_kwargs=tensor_kwargs, + fwd_args=self._fwd_args, + ) + + return await async_protect_torch_call(self.module, no_grad=not is_training)( + *tensor_args, **tensor_kwargs + ) + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args: list[ResultOrValue], + input_kwargs: dict[str, ResultOrValue], + grad_output: float | torch.Tensor, + call_id: CallID, + ) -> GradInType: + tensor_args, tensor_kwargs = ctx.get(call_id, cls.CTX_TENSOR_INPUT_KEY) + n_pos_args = len(tensor_args) + output = ctx.get(call_id, "output").value + + if not isinstance(grad_output, torch.Tensor): + grad_output = torch.tensor( + grad_output, dtype=output.dtype, device=output.device + ) + + while grad_output.ndim < output.ndim: + # Assume we can broadcast, so expand dims + # e.g. if output.shape = (2, 1, 1) and grad_output is a scalar + # then we want to expand to (1, 1, 1) and then broadcast + grad_output = grad_output.unsqueeze(-1) + + gradients = torch.autograd.grad( + output, + [*tensor_args, *tensor_kwargs.values()], + grad_outputs=grad_output, + allow_unused=True, + retain_graph=True, + ) + + grad_args = [ + grad.detach().cpu().float() if grad is not None else None # type: ignore[redundant-expr] + for grad in gradients[:n_pos_args] + ] + grad_kwargs = { + k: grad.detach().cpu().float() + if grad is not None # type: ignore[redundant-expr] + else None + for k, grad in zip( + tensor_kwargs.keys(), gradients[n_pos_args:], strict=True + ) + } + + return grad_args, grad_kwargs + + +def store_tensor_inputs( + ctx: OpCtx, + key: str, + tensor_args: Sequence[torch.Tensor], + tensor_kwargs: Mapping[str, torch.Tensor], + fwd_args: Sequence[inspect.Parameter], + detach: bool = False, +) -> None: + call_id = get_call_id() + # Save tensor inputs for backward pass. Do not clobber "input", since + # that is needed for compute graph. Map positional args to kwargs + # Copying so we don't modify tensor_kwargs in-place + ctx_kwargs = tensor_kwargs.copy() # type: ignore[attr-defined] + + # See Op.__call__ for some notes on what this is doing. + for i_arg, (arg, param) in enumerate( + # strict=False b/c not all params in _fwd_args will be in args (i.e. defaults and **kwargs) + zip(tensor_args, fwd_args, strict=False) + ): + if param.kind == inspect.Parameter.VAR_POSITIONAL: + ctx_args = list(tensor_args[i_arg:]) + break + + # Normal positional arg + ctx_kwargs[param.name] = arg + else: + ctx_args = [] # if we got here, there were no *args + + if detach: + # Detach the tensors from the compute graph and move to CPU + ctx_args = [arg.detach().cpu() for arg in ctx_args] + ctx_kwargs = {k: v.detach().cpu() for k, v in ctx_kwargs.items()} + + ctx.update(call_id, key, (ctx_args, ctx_kwargs)) diff --git a/ldp/llms/__init__.py b/ldp/llms/__init__.py new file mode 100644 index 00000000..77807d23 --- /dev/null +++ b/ldp/llms/__init__.py @@ -0,0 +1,41 @@ +from .chat import ( + JSONSchemaValidationError, + LLMModel, + LLMResult, + MultipleCompletionLLMModel, + process_llm_config, + sum_logprobs, + validate_json_completion, +) +from .embeddings import ( + EmbeddingModel, + EmbeddingModes, + HybridEmbeddingModel, + LiteEmbeddingModel, + SparseEmbeddingModel, +) +from .prompts import ( + append_to_messages, + append_to_sys, + prepend_sys, + prepend_sys_and_append_sys, +) + +__all__ = [ + "EmbeddingModel", + "EmbeddingModes", + "HybridEmbeddingModel", + "JSONSchemaValidationError", + "LLMModel", + "LLMResult", + "LiteEmbeddingModel", + "MultipleCompletionLLMModel", + "SparseEmbeddingModel", + "append_to_messages", + "append_to_sys", + "prepend_sys", + "prepend_sys_and_append_sys", + "process_llm_config", + "sum_logprobs", + "validate_json_completion", +] diff --git a/ldp/llms/chat.py b/ldp/llms/chat.py new file mode 100644 index 00000000..12d64859 --- /dev/null +++ b/ldp/llms/chat.py @@ -0,0 +1,351 @@ +import asyncio +import json +from collections.abc import AsyncGenerator, Callable, Iterable +from datetime import datetime +from typing import Any, ClassVar, Self, cast +from uuid import UUID, uuid4 + +import litellm +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage, ToolsAdapter +from aviary.utils import is_coroutine_callable +from pydantic import BaseModel, ConfigDict, Field, ValidationError, model_validator + + +class JSONSchemaValidationError(ValueError): + """Raised when the completion does not match the specified schema.""" + + +class LLMResult(BaseModel): + """A class to hold the result of a LLM completion.""" + + id: UUID = Field(default_factory=uuid4) + config: dict | None = None + prompt: list[Message] | None = Field( + default=None, description="Messages sent to the LLM." + ) + messages: list[Message] | None = Field( + default=None, description="Messages received from the LLM." + ) + prompt_count: int = Field(default=0, description="Count of prompt tokens.") + completion_count: int = Field(default=0, description="Count of completion tokens.") + model: str + date: str = Field(default_factory=datetime.now().isoformat) + seconds_to_first_token: float | None = None + seconds_to_last_token: float = 0 + logprob: float | None = Field( + default=None, description="Sum of logprobs in the completion." + ) + system_fingerprint: str | None = Field( + default=None, description="System fingerprint received from the LLM." + ) + + @property + def prompt_and_completion_costs(self) -> tuple[float, float]: + """Get a two-tuple of prompt tokens cost and completion tokens cost, in USD.""" + return litellm.cost_per_token( + self.model, + prompt_tokens=self.prompt_count, + completion_tokens=self.completion_count, + ) + + @property + def provider(self) -> str: + """Get the model provider's name (e.g. "openai", "mistral").""" + return litellm.get_llm_provider(self.model)[1] + + def get_supported_openai_params(self) -> list[str] | None: + """Get the supported OpenAI parameters for the model.""" + return litellm.get_supported_openai_params(self.model) + + +def process_llm_config(llm_config: dict) -> dict: + """Remove model_type and try to set max_tokens.""" + result = llm_config.copy() + result.pop("model_type", None) + + if result.get("max_tokens", -1) == -1: # Either max_tokens is missing or it's -1 + model = llm_config["model"] + # these are estimates - should probably do something better in the future. + if model.startswith("gpt-4") or ( + model.startswith("gpt-3.5") and "0125" in model + ): + result["max_tokens"] = 4000 + elif "rrr" not in model: + result["max_tokens"] = 2500 + + return result + + +def sum_logprobs(choice: litellm.utils.Choices) -> float | None: + """Calculate the sum of the log probabilities of an LLM completion (a Choices object). + + Args: + choice: A sequence of choices from the completion. + + Returns: + The sum of the log probabilities of the choice. + """ + try: + logprob_obj = choice.logprobs + except AttributeError: + return None + if isinstance(logprob_obj, dict): + if logprob_obj.get("content"): + return sum( + logprob_info["logprob"] for logprob_info in logprob_obj["content"] + ) + elif choice.logprobs.content: + return sum(logprob_info.logprob for logprob_info in choice.logprobs.content) + return None + + +def validate_json_completion( + completion: litellm.ModelResponse, output_type: type[BaseModel] +) -> None: + """Validate a completion against a JSON schema. + + Args: + completion: The completion to validate. + output_type: The Pydantic model to validate the completion against. + """ + try: + for choice in completion.choices: + output_type.model_validate_json(choice.message.content or "") # type: ignore[union-attr] + except ValidationError as err: + raise JSONSchemaValidationError( + "The completion does not match the specified schema." + ) from err + + +class MultipleCompletionLLMModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + # this should keep the original model + # if fine-tuned, this should still refer to the base model + name: str = "unknown" + config: dict = Field( + default={ + "model": "gpt-3.5-turbo", # Default model should have cheap input/output for testing + "temperature": 0.1, + } + ) + encoding: Any | None = None + + def __str__(self) -> str: + return f"{type(self).__name__} {self.name}" + + @model_validator(mode="after") + def set_model_name(self) -> Self: + if ( + self.config.get("model") in {"gpt-3.5-turbo", None} + and self.name != "unknown" + or self.name != "unknown" + and "model" not in self.config + ): + self.config["model"] = self.name + elif "model" in self.config and self.name == "unknown": + self.name = self.config["model"] + # note we do not consider case where both are set + # because that could be true if the model is fine-tuned + return self + + async def achat( + self, messages: Iterable[Message], **kwargs + ) -> litellm.ModelResponse: + return await litellm.acompletion( + messages=[m.model_dump(exclude_none=True, by_alias=True) for m in messages], + **(process_llm_config(self.config) | kwargs), + ) + + async def achat_iter(self, messages: Iterable[Message], **kwargs) -> AsyncGenerator: + return cast( + AsyncGenerator, + await litellm.acompletion( + messages=[ + m.model_dump(exclude_none=True, by_alias=True) for m in messages + ], + **(process_llm_config(self.config) | kwargs), + stream=True, + stream_options={ + "include_usage": True, # Included to get prompt token counts + }, + ), + ) + + # SEE: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice + # > `required` means the model must call one or more tools. + TOOL_CHOICE_REQUIRED: ClassVar[str] = "required" + + async def call( # noqa: C901, PLR0915 + self, + messages: list[Message], + callbacks: list[Callable] | None = None, + output_type: type[BaseModel] | None = None, + tools: list[Tool] | None = None, + tool_choice: Tool | str | None = TOOL_CHOICE_REQUIRED, + **chat_kwargs, + ) -> list[LLMResult]: + start_clock = asyncio.get_running_loop().time() + + # Deal with tools. OpenAI throws an error if tool list is empty, + # so skip this block if tools in (None, []) + if tools: + chat_kwargs["tools"] = ToolsAdapter.dump_python( + tools, exclude_none=True, by_alias=True + ) + if tool_choice is not None: + chat_kwargs["tool_choice"] = ( + { + "type": "function", + "function": {"name": tool_choice.info.name}, + } + if isinstance(tool_choice, Tool) + else tool_choice + ) + + # deal with specifying output type + if output_type is not None: + schema = json.dumps( + output_type.model_json_schema(mode="serialization"), indent=2 + ) + schema_msg = f"Respond following this JSON schema:\n\n{schema}" + # Get the system prompt and its index, or the index to add it + i, system_prompt = next( + ((i, m) for i, m in enumerate(messages) if m.role == "system"), + (0, None), + ) + messages = [ + *messages[:i], + system_prompt.append_text(schema_msg, inplace=False) + if system_prompt + else Message(role="system", content=schema_msg), + *messages[i + 1 if system_prompt else i :], + ] + chat_kwargs["response_format"] = {"type": "json_object"} + + # add static configuration to kwargs + chat_kwargs = process_llm_config(self.config) | chat_kwargs + n = chat_kwargs.get("n", 1) # number of completions + if n < 1: + raise ValueError("Number of completions (n) must be >= 1.") + + prompt = [ + m + if not isinstance(m, ToolRequestMessage) or m.tool_calls + # OpenAI doesn't allow for empty tool_calls lists, so downcast empty + # ToolRequestMessage to Message here + else Message(role=m.role, content=m.content) + for m in messages + ] + results: list[LLMResult] = [] + + if callbacks is None: + completion: litellm.ModelResponse = await self.achat(prompt, **chat_kwargs) + if output_type is not None: + validate_json_completion(completion, output_type) + + for choice in completion.choices: + if isinstance(choice, litellm.utils.StreamingChoices): + raise NotImplementedError("Streaming is not yet supported.") + + if ( + tools is not None # Allows for empty tools list + or choice.finish_reason == "tool_calls" + or (getattr(choice.message, "tool_calls", None) is not None) + ): + serialized_choice_message = choice.message.model_dump() + serialized_choice_message["tool_calls"] = ( + serialized_choice_message.get("tool_calls") or [] + ) + output_messages: list[Message | ToolRequestMessage] = [ + ToolRequestMessage(**serialized_choice_message) + ] + else: + output_messages = [Message(**choice.message.model_dump())] + + results.append( + LLMResult( + model=self.name, + config=chat_kwargs, + prompt=prompt, + messages=output_messages, + logprob=sum_logprobs(choice), + system_fingerprint=completion.system_fingerprint, + # Note that these counts are aggregated over all choices + completion_count=completion.usage.completion_tokens, # type: ignore[attr-defined,unused-ignore] + prompt_count=completion.usage.prompt_tokens, # type: ignore[attr-defined,unused-ignore] + ) + ) + else: + if tools: + raise NotImplementedError("Using tools with callbacks is not supported") + if n > 1: + raise NotImplementedError( + "Multiple completions with callbacks is not supported" + ) + result = LLMResult(model=self.name, config=chat_kwargs, prompt=prompt) + + sync_callbacks = [f for f in callbacks if not is_coroutine_callable(f)] + async_callbacks = [f for f in callbacks if is_coroutine_callable(f)] + stream_completion = await self.achat_iter(messages, **chat_kwargs) + text_result = [] + role = "assistant" + + async for chunk in stream_completion: + delta = chunk.choices[0].delta + role = delta.role or role + if delta.content: + s = delta.content + if result.seconds_to_first_token == 0: + result.seconds_to_first_token = ( + asyncio.get_running_loop().time() - start_clock + ) + text_result.append(s) + [await f(s) for f in async_callbacks] + [f(s) for f in sync_callbacks] + if hasattr(chunk, "usage"): + result.prompt_count = chunk.usage.prompt_tokens + + output = "".join(text_result) + result.completion_count = litellm.token_counter( + model=self.name, + text=output, + ) + # TODO: figure out how tools stream, and log probs + result.messages = [Message(role=role, content=output)] + results.append(result) + + if not results: + # This happens in unit tests. We should probably not keep this block around + # long-term. Previously, we would emit an empty ToolRequestMessage if + # completion.choices were empty, so I am replicating that here. + results.append( + LLMResult( + model=self.name, + config=chat_kwargs, + prompt=prompt, + messages=[ToolRequestMessage(tool_calls=[])], + ) + ) + + end_clock = asyncio.get_running_loop().time() + + for result in results: + # Manually update prompt count if not set, which can + # happen if the target model doesn't support 'include_usage' + if not result.prompt_count: + result.prompt_count = litellm.token_counter( + model=self.name, + messages=[m.model_dump() for m in result.messages], # type: ignore[union-attr] + ) + + # update with server-side counts + result.seconds_to_last_token = end_clock - start_clock + + return results + + +class LLMModel(MultipleCompletionLLMModel): + async def call(self, *args, **kwargs) -> LLMResult: # type: ignore[override] + return (await super().call(*args, **kwargs))[0] diff --git a/ldp/llms/embeddings.py b/ldp/llms/embeddings.py new file mode 100644 index 00000000..d809948d --- /dev/null +++ b/ldp/llms/embeddings.py @@ -0,0 +1,134 @@ +import asyncio +from abc import ABC, abstractmethod +from enum import StrEnum +from typing import Any + +import litellm +import numpy as np +import tiktoken +from pydantic import BaseModel, ConfigDict, Field, model_validator + + +class EmbeddingModes(StrEnum): + """Enum representing the different modes of an embedding model.""" + + DOCUMENT = "document" + QUERY = "query" + + +class EmbeddingModel(ABC, BaseModel): + name: str + dimensions: int | None = None + + def set_mode(self, mode: EmbeddingModes) -> None: + """Several embedding models have a 'mode' or prompt which affects output.""" + + @abstractmethod + async def embed_texts(self, texts: list[str]) -> list[np.ndarray]: + pass + + async def embed_text(self, text: str) -> np.ndarray: + return (await self.embed_texts([text]))[0] + + @staticmethod + def from_name(embedding: str, **kwargs) -> "EmbeddingModel": + if embedding.startswith("hybrid"): + dense_model = LiteEmbeddingModel(name="-".join(embedding.split("-")[1:])) + return HybridEmbeddingModel( + name=embedding, models=[dense_model, SparseEmbeddingModel(**kwargs)] + ) + if embedding == "sparse": + return SparseEmbeddingModel(**kwargs) + return LiteEmbeddingModel(name=embedding, **kwargs) + + +class LiteEmbeddingModel(EmbeddingModel): + name: str = Field(default="text-embedding-3-small") + dimensions: int | None = Field( + default=None, + description=( + "The length an embedding will have. If left unspecified, we attempt to" + " infer an un-truncated length via LiteLLM's internal model map. If this" + " inference fails, the embedding will be un-truncated." + ), + ) + batch_size: int = 16 + embed_kwargs: dict[str, Any] = Field( + default_factory=dict, + description="Extra kwargs to pass to litellm.aembedding.", + ) + + @model_validator(mode="before") + @classmethod + def infer_dimensions(cls, data: dict[str, Any]) -> dict[str, Any]: + if data.get("dimensions") is not None: + return data + # Let's infer the dimensions + config: dict[str, dict[str, Any]] = litellm.get_model_cost_map( + url="https://raw.githubusercontent.com/BerriAI/litellm/main/litellm/model_prices_and_context_window_backup.json" + ) + output_vector_size: int | None = config.get(data.get("name", ""), {}).get( # noqa: FURB184 + "output_vector_size" + ) + if output_vector_size: + data["dimensions"] = output_vector_size + return data + + async def embed_texts(self, texts: list[str]) -> list[np.ndarray]: + embeddings = [] + # Before you get excited to asyncio.gather this: + # The point of this is to not hit the API rate limit + for i in range(0, len(texts), self.batch_size): + response = await litellm.aembedding( + model=self.name, + input=texts[i : i + self.batch_size], + encoding_format="float", + dimensions=self.dimensions, + **self.embed_kwargs, + ) + embeddings.extend([ + np.array(e["embedding"], dtype=np.float32) for e in response.data + ]) + return embeddings + + +class SparseEmbeddingModel(EmbeddingModel): + """This is a very simple keyword search model - probably best to be mixed with others.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + name: str = "sparse" + dimensions: int = 256 + enc: tiktoken.Encoding = Field( + default_factory=lambda: tiktoken.get_encoding("cl100k_base") + ) + + async def embed_texts(self, texts) -> list[np.ndarray]: + enc_batch = self.enc.encode_ordinary_batch(texts) + # now get frequency of each token rel to length + return [ + np.bincount( + [xi % self.dimensions for xi in x], minlength=self.dimensions + ).astype(np.float32) + / len(x) + for x in enc_batch + ] + + +class HybridEmbeddingModel(EmbeddingModel): + name: str = "hybrid-embed" + models: list[EmbeddingModel] + + @model_validator(mode="before") + @classmethod + def infer_dimensions(cls, data: dict[str, Any]) -> dict[str, Any]: + if data.get("dimensions") is not None: + raise ValueError(f"Don't specify dimensions to {cls.__name__}.") + if not data.get("models") or any(m.dimensions is None for m in data["models"]): + return data + data["dimensions"] = sum(m.dimensions for m in data["models"]) + return data + + async def embed_texts(self, texts): + all_embeds = await asyncio.gather(*[m.embed_texts(texts) for m in self.models]) + return np.concatenate(all_embeds, axis=1) diff --git a/ldp/llms/prompts.py b/ldp/llms/prompts.py new file mode 100644 index 00000000..3b03eb90 --- /dev/null +++ b/ldp/llms/prompts.py @@ -0,0 +1,100 @@ +"""This module provides utility functions for appending and prepending system messages.""" + +from collections.abc import Collection, Iterable + +from aviary.message import Message + + +def append_to_messages(messages: list[Message], new_message: Message) -> list[Message]: + """Appends a message to a list of messages, returning that in-place modified list. + + Examples: + >>> messages = [Message(content="Hello")] + >>> modified_messages = append_to_messages(messages, Message(content="New")) + >>> modified_messages + [Message(role='user', content='Hello'), Message(role='user', content='New')] + >>> id(messages) == id(modified_messages) + True + """ + messages.append(new_message) + return messages + + +def append_to_sys(user_content: str, sys_content: str | None = None) -> list[Message]: + """Appends a user message to a list of messages, optionally including a system message. + + Args: + user_content: The content of the user message. + sys_content: Optional content for the system message. Defaults to None. + + Returns: + A list of messages including the optional system message and the user message. + + Examples: + >>> append_to_sys("Hello, world!") + [Message(role='user', content='Hello, world!')] + + >>> append_to_sys("Hello, world!", "System initialized.") + [Message(role='system', content='System initialized.'), Message(role='user', content='Hello, world!')] + """ + sys = [Message(role="system", content=sys_content)] if sys_content else [] + return [*sys, Message(content=user_content)] + + +def prepend_sys(messages: Collection, sys_content: str) -> list[Message]: + """Prepends a system message to a list of messages. + + Args: + messages: The list of existing messages. + sys_content: The content of the system message to be prepended. + + Returns: + A new list of messages with the system message prepended. + + Examples: + >>> messages = [Message(role="user", content="Hello!")] + >>> prepend_sys(messages, "System initialized.") + [Message(role='system', content='System initialized.'), Message(role='user', content='Hello!')] + """ + return [Message(role="system", content=sys_content), *messages] + + +def indent_xml(xml_string, indent_size=2): + output = [] + indent_level = 0 + + # Split the input XML into parts by tags + parts = xml_string.replace(">", ">\n").replace("<", "\n<").split("\n") + parts = [part for part in parts if part.strip()] # Remove empty parts + + for part in parts: + if part.startswith("") and ">" in part: + # Opening tag, maintain then increase indent + output.append(" " * indent_level + part) + indent_level += indent_size + elif part.endswith("/>"): + # Self-closing tag, just append + output.append(" " * indent_level + part) + else: + # Text or other data, maintain current indent + # Handle multiple lines within text nodes + text_lines = part.split("\n") + output.extend([ + " " * indent_level + line.strip() for line in text_lines if line.strip() + ]) + + return "\n".join(output) + + +def prepend_sys_and_append_sys( + messages: Iterable[Message], initial_sys_content: str, final_sys_content: str +) -> list[Message]: + return [ + Message(role="system", content=initial_sys_content), + *messages, + Message(role="system", content=final_sys_content), + ] diff --git a/ldp/py.typed b/ldp/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 00000000..69341fd8 --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1 @@ +test_outputs/ diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..f405a0f2 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,8 @@ +from enum import StrEnum + + +class CILLMModelNames(StrEnum): + """Models to use for generic CI testing.""" + + ANTHROPIC = "claude-3-haiku-20240307" # Cheap and not Anthropic's cutting edge + OPENAI = "gpt-4o-mini-2024-07-18" # Cheap and not OpenAI's cutting edge diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..33b54fb9 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,30 @@ +import os +import random + +import numpy as np +import pytest +import torch +from aviary.env import DummyEnv + +IN_GITHUB_ACTIONS: bool = os.getenv("GITHUB_ACTIONS") == "true" + + +@pytest.fixture(name="dummy_env") +def fixture_dummy_env() -> DummyEnv: + return DummyEnv() + + +def set_seed(seed: int | None) -> None: + if seed is None: + return + + random.seed(seed) + np.random.seed(seed) # noqa: NPY002 + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + +@pytest.fixture(name="seed_zero") +def fixture_seed_zero() -> None: + """Set a 0 seed to minimize the chances of test flakiness.""" + set_seed(0) diff --git a/tests/test_agents.py b/tests/test_agents.py new file mode 100644 index 00000000..a0282a60 --- /dev/null +++ b/tests/test_agents.py @@ -0,0 +1,662 @@ +import contextlib +import shutil +import tempfile +from enum import IntEnum, auto +from functools import partial +from pathlib import Path +from unittest.mock import patch + +import networkx as nx +import pytest +from aviary.env import DummyEnv +from aviary.message import Message +from aviary.tools import Tool, ToolCall, ToolRequestMessage +from httpx import AsyncClient +from pydantic import BaseModel, Field + +from ldp.agent import ( + Agent, + AgentConfig, + DQNAgent, + HTTPAgentClient, + MemoryAgent, + ReActAgent, + SimpleAgent, + SimpleAgentState, + make_simple_agent_server, +) +from ldp.alg.algorithms import to_network +from ldp.graph.common_ops import LLMCallOp +from ldp.graph.gradient_estimators import llm_straight_through_estimator as llm_ste +from ldp.graph.gradient_estimators import straight_through_estimator as ste +from ldp.graph.memory import Memory +from ldp.graph.modules import ReActModule, ToolDescriptionMethods +from ldp.graph.op_utils import eval_mode +from ldp.graph.ops import OpResult +from ldp.llms import LLMModel + +from . import CILLMModelNames +from .conftest import IN_GITHUB_ACTIONS + +HERE = Path(__file__).parent + + +def intuitive_arg(x: str) -> float: # type: ignore[empty-body] + """Cast the input argument x to a float.""" + + +class StubState(BaseModel): + """Stub model docstring.""" + + defaulted_int: int = Field(default=1, description="A description of the int.") + required_str: str = Field(description="A description of the str.") + + +class StubEnum(IntEnum): + """Stub enum docstring.""" + + STUB1 = auto() + STUB2 = auto() + + +def many_edge_cases( + x: int, + y: None, + union: int | None, + pydantic_model: StubState, + basic_dict: dict[str, int], + complex_dict: dict[str, tuple[str, int]], + enum: StubEnum, + defaulted_str: str = "default", + defaulted_float: float = 1.0, +) -> None: + """ + Check using docstrings as partial f-string templates like so: {summary_format}. + + Args: + x: Yes, I end with a colon : + y: I am null. + And despite that there is a multiline argument description. + union: I am a union and the current year is {current_year}. + pydantic_model: I am a Pydantic model. + basic_dict: I am a dictionary with primitive values. + complex_dict: I am a dictionary with complex values. + enum: I am an enum. + defaulted_str: I have a string default value. + defaulted_float: I have a float default value. + """ + + +class TestAgentState: + @pytest.mark.parametrize( + "agent", + [SimpleAgent(), MemoryAgent(), ReActAgent(), DQNAgent()], + ) + @pytest.mark.asyncio + async def test_no_state_mutation(self, dummy_env: DummyEnv, agent: Agent) -> None: + obs, tools = await dummy_env.reset() + + agent_state = agent_state_0 = await agent.init_state(tools=tools) + agent_state_0_json = agent_state_0.model_dump_json() + for _ in range(3): # Give a few steps to finish, the assertion needs >0 steps + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, truncated = await dummy_env.step(action.value) + if done: + break + + assert ( + agent_state_0_json == agent_state_0.model_dump_json() + ), "Agent state should not be mutated between calls to get_asv" + + def test_serialization_deserializaton(self) -> None: + orig_state = SimpleAgentState( + messages=[Message(content="stub"), ToolRequestMessage(content="stub2")] + ) + copied_state = SimpleAgentState(**orig_state.model_dump()) + assert orig_state.messages == copied_state.messages + assert isinstance(copied_state.messages[1], ToolRequestMessage) + + +class TestSimpleAgent: + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, CILLMModelNames.OPENAI.value] + ) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_dummyenv(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + + agent = SimpleAgent(llm_model={"model": model_name, "temperature": 0.1}) + agent_state = await agent.init_state(tools=tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, truncated = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + + # Check serialization after get_asv runs to ensure private + # Ops aren't included + assert agent.model_dump() == { + "llm_model": {"model": model_name, "temperature": 0.1}, + "sys_prompt": None, + } + + # Check we can get the LLM results to sum cost and count tokens + assert action.call_id is not None, "Compute graph not attached to action." + for op_r in action.traverse(): + if issubclass(op_r.op_class, LLMCallOp): + # will raise if cannot retrieve result + op_r._get_from_ctx("result") + break + else: + raise RuntimeError("Could not find LLMCallOp in compute graph") + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, CILLMModelNames.OPENAI.value] + ) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_agent_grad(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + + agent = SimpleAgent(llm_model={"model": model_name, "temperature": 0.1}) + agent_state = await agent.init_state(tools=tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + assert action.call_id is not None + obs, reward, done, _ = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + assert ( + action.call_id is not None + ), "action is not associated with a forward pass call_id" + + # NOTE: we would not normally pass reward as a gradient, but this is a way + # to check that gradients are flowing + action.compute_grads( + reward, + backward_fns={"_config_op": ste, "_llm_call_op": llm_ste}, + ) + _, g = action.ctx.get_input_grads(action.call_id) + assert isinstance( + g["config"], dict + ), "compute_grads() didn't descend into config dict" + assert all(g["config"].values()), "Gradient should be non-zero" + + graph = to_network(action) + with ( + tempfile.NamedTemporaryFile(mode="w", suffix=".png") as f, + contextlib.suppress( + FileNotFoundError # Allow tests to run without graphviz on OS + ), + ): + nx.drawing.nx_pydot.to_pydot(graph).write_png(f.name) + + if not IN_GITHUB_ACTIONS: + output_dir = HERE / "test_outputs" + output_dir.mkdir(exist_ok=True) + shutil.copy( + f.name, + output_dir / f"TestSimpleAgent.test_agent_grad.{model_name}.png", + ) + + +class TestMemoryAgent: + # # On 5/14/2024, claude 3 opus would not follow its past memories + @pytest.mark.parametrize("model_name", [CILLMModelNames.OPENAI.value]) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_dummyenv(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + + agent = MemoryAgent(llm_model={"model": model_name, "temperature": 0.1}) + agent_state = await agent.init_state(tools=tools) + + # access memory and add one to it + action = ToolRequestMessage( + content="Stories that start with 'Once there was' are always interesting.", + tool_calls=[ + ToolCall.from_name( + tools[0].info.name, story="Once there were was nothing." + ) + ], + ) + memory = agent._memory_op.memory_model + await memory.add_memory( + Memory( + query="Write a 5 word story and call print", + output=str(action), + value=1000.0, + ) + ) + + new_action, agent_state, _ = await agent.get_asv(agent_state, obs) + assert "Once there was" in str(new_action) + obs, reward, done, truncated = await dummy_env.step(new_action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + + # Check we can get the LLM results to sum cost and count tokens + assert new_action.call_id is not None, "Compute graph not attached to action." + for op_r in new_action.traverse(): + if issubclass(op_r.op_class, LLMCallOp): + # will raise if cannot retrieve result + op_r._get_from_ctx("result") + break + else: + raise RuntimeError("Could not find LLMCallOp in compute graph") + + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_agent_grad(self, dummy_env: DummyEnv) -> None: + obs, tools = await dummy_env.reset() + + agent = MemoryAgent() + agent_state = await agent.init_state(tools=tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + assert action.call_id is not None + obs, reward, done, truncated = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + assert ( + action.call_id is not None + ), "action is not associated with a forward pass call_id" + + # NOTE: we would not normally pass reward as a gradient, but this is a way + # to check that gradients are flowing + ste_ = partial(ste, descend=False) + action.compute_grads( + reward, + backward_fns={ + "_prompt_op": ste_, + "_package_op": ste_, + "_format_memory_op": ste_, + "_memory_op": ste_, + "_llm_call_op": llm_ste, + }, + ) + _, g = action.ctx.get_input_grads(action.call_id) + assert isinstance( + g["config"], dict + ), "compute_grads() didn't descend into config dict" + assert all(g["config"].values()), "Action gradient should be non-zero" + + memory_op = agent._memory_op + mem_call_ids = list(memory_op.get_call_ids({action.call_id.run_id})) + assert len(mem_call_ids) == 1, "MemoryOp should have been called exactly once" + _, g = memory_op.get_input_grads(mem_call_ids[0]) + assert any(g.values()), "Memory gradient should be non-zero" + + +class TestReActAgent: + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-4-turbo"] + ) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_react_dummyenv(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + agent = ReActAgent(llm_model={"model": model_name, "temperature": 0.1}) + agent_state = await agent.init_state(tools=tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, truncated = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + + # Check we can get the LLM results to sum cost and count tokens + assert action.call_id is not None, "Compute graph not attached to action." + for op_r in action.traverse(): + if issubclass(op_r.op_class, LLMCallOp): + # will raise if cannot retrieve result + op_r._get_from_ctx("result") + break + else: + raise RuntimeError("Could not find LLMCallOp in compute graph") + + @pytest.mark.asyncio + async def test_multi_step(self, dummy_env: DummyEnv) -> None: + obs, tools = await dummy_env.reset() + obs = dummy_env.state.messages = [ + Message( + content=( + "Cast '5.5' to a float, then to an integer," + " and finally use it to write a story of that many words." + ) + ) + ] + agent = ReActAgent() + agent_state = await agent.init_state(tools=tools) + for i in range(4): # noqa: B007 + action, agent_state, _ = await agent.get_asv(agent_state, obs) + for m in agent_state.messages: + assert m.content + assert ( + "Observation: Observation" not in m.content + ), "Prepended duplicate observations" + obs, _, done, _ = await dummy_env.step(action.value) + if done: + break + if i < 2 or not done: + raise AssertionError( + "Environment should have finished, with at least 2 environment steps." + ) + + def test_agent_op_naming(self) -> None: + agent = ReActAgent() + for op_name in ( + "prompt_op", + "package_msg_op", + "tool_select_module.config_op", + "tool_select_module.llm_call_op", + "tool_select_module.parse_msg_op", + ): + obj, expected = agent._react_module, f"_react_module.{op_name}" + if "." in op_name: + op, op_name = op_name.split(".", maxsplit=1) + obj = getattr(obj, op) + assert getattr(obj, op_name).name == expected + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-4-turbo"] + ) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_agent_grad(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + + agent = ReActAgent(llm_model={"model": model_name, "temperature": 0.1}) + agent_state = await agent.init_state(tools=tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + assert action.call_id is not None + obs, reward, done, truncated = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + assert ( + action.call_id is not None + ), "action is not associated with a forward pass call_id" + + # NOTE: we would not normally pass reward as a gradient, but this is a way + # to check that gradients are flowing + ste_ = partial(ste, descend=False) + action.compute_grads( + reward, + # Give everything a straight-through gradient approximation + # so we can confirm gradient flow + backward_fns={ + "_react_module.package_msg_op": ste_, + "_react_module.prompt_op": ste_, + "_react_module.tool_select_module.llm_call_op": llm_ste, + "_react_module.tool_select_module.parse_msg_op": ste_, + }, + ) + _, g = action.ctx.get_input_grads(action.call_id) + assert all(g.values()), "Gradient should be non-zero" + + # make sure it propagated far enough + prompt_op = agent._react_module.prompt_op + _, g = prompt_op.get_input_grads( + next(iter(prompt_op.get_call_ids({action.call_id.run_id}))) + ) + assert all(g.values()), "PromptOp gradients should be positive" + + graph = to_network(action, max_label_height=4, max_label_width=50) + with ( + tempfile.NamedTemporaryFile(mode="w", suffix=".png") as f, + contextlib.suppress( + FileNotFoundError # Allow tests to run without graphviz on OS + ), + ): + nx.drawing.nx_pydot.to_pydot(graph).write_png(f.name) + + if not IN_GITHUB_ACTIONS: + output_dir = HERE / "test_outputs" + output_dir.mkdir(exist_ok=True) + shutil.copy( + f.name, + output_dir / f"TestReActAgent.test_agent_grad.{model_name}.png", + ) + + @pytest.mark.parametrize( + ("description_method", "expected"), + [ + (ToolDescriptionMethods.STR, NotImplementedError), + ( + ToolDescriptionMethods.JSON, + ( + "Answer the following questions as best you can. You have access to" + " the following tools:\n\nTools are specified with a JSON" + ' schema.\n{"name":"many_edge_cases","description":"Check using' + " docstrings as partial f-string templates like so:" + ' {summary_format}.","parameters":{"type":"object","properties":{"x":{"description":"Yes,' + " I end with a colon" + ' :","title":"X","type":"integer"},"y":{"description":"I am null.' + " And despite that there is a multiline argument" + ' description.","title":"Y","type":"null"},"union":{"anyOf":[{"type":"integer"},{"type":"null"}],"description":"I' + " am a union and the current year is" + ' {current_year}.","title":"Union"},"pydantic_model":{"allOf":[{"$ref":"#/$defs/StubState"}],"description":"I' + " am a Pydantic" + ' model."},"basic_dict":{"additionalProperties":{"type":"integer"},"description":"I' + ' am a dictionary with primitive values.","title":"Basic' + ' Dict","type":"object"},"complex_dict":{"additionalProperties":{"maxItems":2,"minItems":2,"prefixItems":[{"type":"string"},{"type":"integer"}],"type":"array"},"description":"I' + ' am a dictionary with complex values.","title":"Complex' + ' Dict","type":"object"},"enum":{"allOf":[{"$ref":"#/$defs/StubEnum"}],"description":"I' + " am an" + ' enum."},"defaulted_str":{"default":"default","description":"I' + ' have a string default value.","title":"Defaulted' + ' Str","type":"string"},"defaulted_float":{"default":1.0,"description":"I' + ' have a float default value.","title":"Defaulted' + ' Float","type":"number"}},"required":["x","y","union","pydantic_model","basic_dict","complex_dict","enum"],"$defs":{"StubEnum":{"description":"Stub' + " enum" + ' docstring.","enum":[1,2],"title":"StubEnum","type":"integer"},"StubState":{"description":"Stub' + " model" + ' docstring.","properties":{"defaulted_int":{"default":1,"description":"A' + ' description of the int.","title":"Defaulted' + ' Int","type":"integer"},"required_str":{"description":"A' + ' description of the str.","title":"Required' + ' Str","type":"string"}},"required":["required_str"],"title":"StubState","type":"object"}}}}\n{"name":"intuitive_arg","description":"Cast' + " the input argument x to a" + ' float.","parameters":{"type":"object","properties":{"x":{"title":"X","type":"string"}},"required":["x"]}}\n\nUse' + " the following format:\n\nThought: you should always think about" + " what to do\nAction: the action to take, should be one of" + " [many_edge_cases, intuitive_arg]\nAction Input: comma separated" + " list of inputs to action as python tuple\nObservation: the result" + " of the action\n... (this Thought/Action/Action Input/Observation" + " can repeat N times)\n\nExample:\n\nThought: I need to use the" + ' get_weather tool\nAction: get_weather\nAction Input: "New York",' + " 7\nObservation: The 7 day forecast for New York is [...]" + ), + ), + ( + ToolDescriptionMethods.XML, + ( + "Answer the following questions as best you can. You have access to" + " the following tools:\n\nTools are specified with an XML" + " schema.\nmany_edge_casesCheck" + " using docstrings as partial f-string templates like so:" + " {summary_format}.objectYes," + " I end with a colon" + " :XintegerI" + " am null. And despite that there is a multiline argument" + " description.YnullintegernullI" + " am a union and the current year is" + " {current_year}.Union#/$defs/StubStateI' + " am a Pydantic" + " model.integerI" + " am a dictionary with primitive values.Basic" + " Dictobject22stringintegerarrayI" + " am a dictionary with complex values.Complex" + " Dictobject#/$defs/StubEnumI' + " am an" + " enum.defaultI" + " have a string default value.Defaulted" + " Strstring1.0I" + " have a float default value.Defaulted" + " Floatnumberxyunionpydantic_modelbasic_dictcomplex_dictenumStub enum' + " docstring.12StubEnumintegerStub" + " model" + " docstring.1A" + " description of the int.Defaulted" + " IntintegerA" + " description of the str.Required" + " Strstringrequired_strStubStateobject\nintuitive_argCast" + " the input argument x to a" + " float.objectXstringx\n\nUse" + " the following format:\n\nThought: you should always think about" + " what to do\nAction: the action to take, should be one of" + " [many_edge_cases, intuitive_arg]\nAction Input: comma separated" + " list of inputs to action as python tuple\nObservation: the result" + " of the action\n... (this Thought/Action/Action Input/Observation" + " can repeat N times)\n\nExample:\n\nThought: I need to use the" + ' get_weather tool\nAction: get_weather\nAction Input: "New York",' + " 7\nObservation: The 7 day forecast for New York is [...]" + ), + ), + ], + ) + @pytest.mark.asyncio + async def test_complex_system_prompt( + self, + description_method: ToolDescriptionMethods, + expected: str | type[Exception], + ) -> None: + tools = [ + Tool.from_function(many_edge_cases), + Tool.from_function(intuitive_arg, allow_empty_param_descriptions=True), + ] + user_msg = Message(content="Cast the string '5.6' to a float.") + with ( + patch.object(LLMModel, "achat") as mock_achat, + patch.object(ReActModule, "parse_message"), + ): + agent = ReActAgent(tool_description_method=description_method) + agent_state = await agent.init_state(tools=tools) + if not isinstance(expected, str): + with pytest.raises(expected): + await agent.get_asv(agent_state, obs=[user_msg]) + return + await agent.get_asv(agent_state, obs=[user_msg]) + mock_achat.assert_awaited_once() + assert mock_achat.await_args + assert mock_achat.await_args[0][0] == [ + Message(role="system", content=expected), + user_msg, + ] + + +class TestHTTPAgentClient: + @patch.dict("os.environ", {"AUTH_TOKEN": "stub"}) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_lifecycle(self, dummy_env: DummyEnv) -> None: + obs, tools = await dummy_env.reset() + # Let's turn the prompt to require multiple steps + obs = dummy_env.state.messages = [ + Message( + content=( + "Cast '5.5' to a float, then to an integer," + " and finally use it to write a story of that many words." + ) + ) + ] + + remote_agent = SimpleAgent() + base_server_url = "http://testserver" + agent_client = HTTPAgentClient[SimpleAgentState]( + agent_state_type=SimpleAgentState, + server_url=base_server_url, + request_headers={"Authorization": "Bearer stub"}, + ) + async with AsyncClient( + app=make_simple_agent_server(agent=remote_agent), + base_url=base_server_url, + ) as async_client: + # NOTE: just directly hit the server, since info is not an Agent method + response = await async_client.get( + f"{base_server_url}/info", headers={"Authorization": "Bearer stub"} + ) + response.raise_for_status() + assert response.json()["agent_type"] == "SimpleAgent" + + with patch("httpx.AsyncClient.post", async_client.post): + agent_state_0 = await agent_client.init_state(tools=tools) + assert [t.info for t in agent_state_0.tools] == [t.info for t in tools] + + # NOTE: also check we can repeatedly call the get_asv + with patch("httpx.AsyncClient.post", async_client.post), eval_mode(): + await agent_client.get_asv(agent_state_0, obs) + await agent_client.get_asv(agent_state_0, obs) + with patch("httpx.AsyncClient.post", async_client.post), eval_mode(): + action, agent_state_1, vhat = await agent_client.get_asv( + agent_state_0, obs + ) + + assert isinstance(action, OpResult) + assert isinstance(action.value, ToolRequestMessage) + assert isinstance(agent_state_1, SimpleAgentState) + assert len(agent_state_1.messages) == 2 + assert isinstance(agent_state_1.messages[0], Message) + assert isinstance(agent_state_1.messages[1], ToolRequestMessage) + assert isinstance(vhat, float) + + # This makes an obs with ToolResponseMessage inside + obs, reward, done, _ = await dummy_env.step(action.value) + assert not done + + with patch("httpx.AsyncClient.post", async_client.post), eval_mode(): + # Check we can make a second sequential Agent decision without crashing + await agent_client.get_asv(agent_state_1, obs) + + +class TestDQNAgent: + @pytest.mark.parametrize( + "model_name", ["gpt-4-turbo", CILLMModelNames.ANTHROPIC.value] + ) + @pytest.mark.asyncio + @pytest.mark.flaky( # Rerun if LLM call does not return expected result + reruns=3, only_on=[AssertionError] + ) + async def test_dummyenv(self, dummy_env: DummyEnv, model_name: str) -> None: + obs, tools = await dummy_env.reset() + agent = DQNAgent(llm_strategy_model={"model": model_name, "temperature": 1.0}) + agent_state = await agent.init_state(tools) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, truncated = await dummy_env.step(action.value) + assert ( + reward > 0 + ), "Reward should be positive, indicating agent called print_story tool" + assert done + + +@pytest.mark.parametrize("agent_cls", [SimpleAgent, DQNAgent, MemoryAgent, ReActAgent]) +def test_agent_config(agent_cls: type[Agent]): + config = AgentConfig(agent_type=agent_cls.__name__) + assert isinstance(hash(config), int), "AgentConfig should be hashable" + agent = config.construct_agent() + assert isinstance(agent, agent_cls) diff --git a/tests/test_algorithms.py b/tests/test_algorithms.py new file mode 100644 index 00000000..97cb753f --- /dev/null +++ b/tests/test_algorithms.py @@ -0,0 +1,34 @@ +import pytest +from aviary.env import DummyEnv + +from ldp.agent import SimpleAgent +from ldp.alg.algorithms import discounted_returns + + +@pytest.mark.asyncio +async def test_rollout_and_discounting(dummy_env: DummyEnv) -> None: + obs, tools = await dummy_env.reset() + + agent = SimpleAgent(tools=tools) + agent_state = await agent.init_state(tools=tools) + + observations = [] + actions = [] + rewards = [] + terms = [] + done = True + for i in range(3): # noqa: B007 + if done: + obs, _ = await dummy_env.reset() + agent_state = await agent.init_state(tools=tools) + + observations.append((obs, agent_state)) + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, _ = await dummy_env.step(action.value) + actions.append(action) + rewards.append(reward) + terms.append(done) + + print(terms) + d_returns = discounted_returns(rewards, terms, 0.5) + print(d_returns) diff --git a/tests/test_buffers.py b/tests/test_buffers.py new file mode 100644 index 00000000..05e33b20 --- /dev/null +++ b/tests/test_buffers.py @@ -0,0 +1,22 @@ +import pytest + +from ldp.alg.optimizer.replay_buffers import CircularReplayBuffer + + +def test_circular_buffer(): + buf = CircularReplayBuffer() + + samples = [{"state": 1, "action": 2, "reward": 3, "t": t} for t in range(5)] + buf += samples + buf.resize(3) # should eject t=0, 1 + assert {sample["t"] for sample in buf} == {2, 3, 4} + + # check we can iterate + next(buf.batched_iter(batch_size=3)) + + # add a bad sample + buf.append({}) + with pytest.raises( + RuntimeError, match="Found buffer element with inconsistent keys" + ): + next(buf.batched_iter(batch_size=4)) diff --git a/tests/test_context_managers.py b/tests/test_context_managers.py new file mode 100644 index 00000000..68b95937 --- /dev/null +++ b/tests/test_context_managers.py @@ -0,0 +1,138 @@ +import asyncio +import random +from uuid import UUID + +import pytest + +from ldp.graph.op_utils import ( + _RUN_ID, + CallID, + compute_graph, + eval_mode, + get_call_id, + get_run_id, + get_training_mode, + op_call, + set_training_mode, + train_mode, +) + + +@pytest.mark.asyncio +async def test_run_ids(): + # check that we don't create a new run ID if we're + # already in a context + async with compute_graph(): + run_id_1 = get_run_id() + async with compute_graph(): + run_id_2 = get_run_id() + assert ( + run_id_1 == run_id_2 + ), "Should not create a new run ID if already in a run context." + + # Check that after exiting the context, _RUN_ID is no longer set + with pytest.raises(LookupError): + _RUN_ID.get() + + # Now check that we don't clobber in coroutines + async def run_id_test() -> UUID: + async with compute_graph(): + # Wait randomly, giving time for other calls to potentially clobber + # this one. + await asyncio.sleep(random.uniform(0.1, 0.5)) + return get_run_id() + + run_ids = await asyncio.gather(*[run_id_test() for _ in range(10)]) + assert len(run_ids) == len( + set(run_ids) + ), "At least two compute graphs had the same run ID, which indicates a clobber." + + +@pytest.mark.asyncio +async def test_call_ids(): + # check that we don't create a new call ID if we're + # already in a context + async with compute_graph(), op_call(): + call_id_1 = get_call_id() + async with op_call(): + call_id_2 = get_call_id() + assert ( + call_id_1 == call_id_2 + ), "Should not create a new call ID if already in a call context." + + # Check we cannot create a call ID if not in a run context + with pytest.raises(RuntimeError, match=r".*not inside compute graph context.*"): + async with op_call(): + pass + + async def call_test() -> CallID: + async with op_call(): + # Wait randomly, giving time for other calls to potentially clobber + # this one. + await asyncio.sleep(random.uniform(0.1, 0.5)) + return get_call_id() + + async with compute_graph(): + call_ids = await asyncio.gather(*[call_test() for _ in range(10)]) + assert len(call_ids) == len( + set(call_ids) + ), "At least two compute graphs had the same run ID, which indicates a clobber." + + +@pytest.mark.asyncio +async def test_training_mode(): + with eval_mode(): + assert not get_training_mode(), "Training mode was not set to False" + assert ( + get_training_mode() + ), "Training mode should have been reset to True after exiting context" + + set_training_mode(False) + with train_mode(): + assert get_training_mode(), "Training mode was not set to True" + assert ( + not get_training_mode() + ), "Training mode should have been reset to False after exiting context" + + # Put back to training for next round of tests + set_training_mode(True) + + async def training_mode_test(i: int): + train = i % 2 == 0 + set_training_mode(train) + # wait a random amount of time to give a chance for other coroutines to clobber + await asyncio.sleep(random.uniform(0.1, 0.5)) + + assert get_training_mode() == train, "Training mode was overwritten." + + await asyncio.gather(*[training_mode_test(i) for i in range(10)]) + # Make sure training_mode_test didn't change our training mode + assert get_training_mode() + + # Make sure nesting works + with train_mode(): + assert get_training_mode() + with eval_mode(): + assert not get_training_mode() + with train_mode(): + assert get_training_mode() + assert not get_training_mode() + assert get_training_mode() + + # Make sure modes are set correctly in coroutines + + @train_mode() + async def train_then_eval(): + assert get_training_mode() + await asyncio.sleep(0.0) + async with eval_mode(): + assert not get_training_mode() + + @eval_mode() + async def eval_then_train(): + assert not get_training_mode() + await asyncio.sleep(0.0) + async with train_mode(): + assert get_training_mode() + + await asyncio.gather(train_then_eval(), eval_then_train()) diff --git a/tests/test_embeddings.py b/tests/test_embeddings.py new file mode 100644 index 00000000..3b61e9d8 --- /dev/null +++ b/tests/test_embeddings.py @@ -0,0 +1,138 @@ +import asyncio +from unittest.mock import MagicMock, patch + +import litellm +import numpy as np +import pytest +from litellm.caching import Cache, InMemoryCache +from pytest_subtests import SubTests + +from ldp.llms import ( + EmbeddingModel, + HybridEmbeddingModel, + LiteEmbeddingModel, + SparseEmbeddingModel, +) + + +class TestLiteEmbeddingModel: + @pytest.mark.asyncio + async def test_embed_texts(self) -> None: + texts = ["Hello", "World"] + batch_size = 1 # NOTE: this affects the mock below + model = LiteEmbeddingModel(name="stub", batch_size=1) + with patch( + "litellm.aembedding", + autospec=True, + side_effect=[ + MagicMock(data=[{"embedding": [1.0, 2.0]}]), + MagicMock(data=[{"embedding": [3.0, 4.0]}]), + ], + ) as mock_aembedding: + embeddings = await model.embed_texts(texts) + + assert np.allclose(embeddings[0], [1.0, 2.0]) + assert np.allclose(embeddings[1], [3.0, 4.0]) + assert mock_aembedding.call_count == len(texts) / batch_size + + @pytest.mark.parametrize( + ("model_name", "expected_dimensions"), + [ + ("stub", None), + ("text-embedding-ada-002", 1536), + ("text-embedding-3-small", 1536), + ], + ) + def test_model_dimension_inference( + self, model_name: str, expected_dimensions: int | None + ) -> None: + assert LiteEmbeddingModel(name=model_name).dimensions == expected_dimensions + + @pytest.mark.asyncio + async def test_can_change_dimension(self) -> None: + """We run this one for real, because want to test end to end.""" + stub_texts = ["test1", "test2"] + + model = LiteEmbeddingModel(name="text-embedding-3-small") + assert model.dimensions == 1536 + + model = LiteEmbeddingModel(name="text-embedding-3-small", dimensions=8) + assert model.dimensions == 8 + etext1, etext2 = await model.embed_texts(stub_texts) + assert len(etext1) == len(etext2) == 8 + + @pytest.mark.asyncio + async def test_caching(self) -> None: + model = LiteEmbeddingModel( + name="text-embedding-3-small", dimensions=8, embed_kwargs={"caching": True} + ) + # Make sure there is no existing cache. + with patch("litellm.cache", None): + # now create a new cache + litellm.cache = Cache() + assert isinstance(litellm.cache.cache, InMemoryCache) + assert len(litellm.cache.cache.cache_dict) == 0 + + _ = await model.embed_texts(["test1"]) + # need to do this to see the data propagated to cache + await asyncio.sleep(0.0) + + # Check the cache entry was made + assert len(litellm.cache.cache.cache_dict) == 1 + + +@pytest.mark.asyncio +async def test_sparse_embedding_model(subtests: SubTests): + with subtests.test("1D sparse"): + ndim = 1 + expected_output = [[1.0], [1.0]] + + model = SparseEmbeddingModel(dimensions=ndim) + result = await model.embed_texts(["test1", "test2"]) + + assert result == expected_output + + with subtests.test("large sparse"): + ndim = 1024 + + model = SparseEmbeddingModel(dimensions=ndim) + result = await model.embed_texts(["hello test", "go hello"]) + + assert max(result[0]) == max(result[1]) == 0.5 + + with subtests.test("default sparse"): + model = SparseEmbeddingModel() + result = await model.embed_texts(["test1 hello", "test2 hello"]) + + assert pytest.approx(sum(result[0]), abs=1e-6) == pytest.approx( + sum(result[1]), abs=1e-6 + ) + + +@pytest.mark.asyncio +async def test_hybrid_embedding_model() -> None: + hybrid_model = HybridEmbeddingModel( + models=[LiteEmbeddingModel(), SparseEmbeddingModel()] + ) + + # Mock the embedded documents of Lite and Sparse models + with ( + patch.object(LiteEmbeddingModel, "embed_texts", return_value=[[1.0], [2.0]]), + patch.object(SparseEmbeddingModel, "embed_texts", return_value=[[3.0], [4.0]]), + ): + result = await hybrid_model.embed_texts(["hello", "world"]) + assert result.tolist() == [[1.0, 3.0], [2.0, 4.0]] + + +@pytest.mark.asyncio +async def test_class_constructor() -> None: + original_name = "hybrid-text-embedding-3-small" + model = EmbeddingModel.from_name(original_name) + assert isinstance(model, HybridEmbeddingModel) + assert model.name == original_name + dense_model, sparse_model = model.models + assert dense_model.name == "text-embedding-3-small" + assert dense_model.dimensions == 1536 + assert sparse_model.name == "sparse" + assert sparse_model.dimensions == 256 + assert model.dimensions == 1792 diff --git a/tests/test_envs.py b/tests/test_envs.py new file mode 100644 index 00000000..2cfa9950 --- /dev/null +++ b/tests/test_envs.py @@ -0,0 +1,154 @@ +from typing import ClassVar + +import litellm +import pytest +from aviary.env import DummyEnv, DummyEnvState +from aviary.message import Message +from aviary.tools import Tool, ToolCall, ToolRequestMessage, ToolResponseMessage + +from ldp.agent import SimpleAgent + +from . import CILLMModelNames + + +class ParallelizedDummyEnv(DummyEnv): + def __init__(self, right_hand_broken: bool = False): + super().__init__() + self.right_hand_broken = right_hand_broken + + RIGHT_HAND_BROKEN_MESSAGE: ClassVar[str] = "Right hand is broken." + + async def reset(self) -> tuple[list[Message], list[Tool]]: + def move_right_hand( + distance: int, # noqa: ARG001 + state: DummyEnvState, + ) -> None: + """ + Move your right hand forward or backward. + + Args: + distance: Integer distance to move (mm), where forward is positive. + state: Current state. + """ + if self.right_hand_broken: # Use this to test tool errors + raise RuntimeError(self.RIGHT_HAND_BROKEN_MESSAGE) + state.reward += 1 + + def move_left_hand( + distance: int, # noqa: ARG001 + state: DummyEnvState, + ) -> None: + """ + Move your left hand forward or backward. + + Args: + distance: Integer distance to move (mm), where forward is positive. + state: Current state. + """ + state.reward += 1 + + def smile_and_wave(state: DummyEnvState) -> None: + """ + Smile and wave. + + Args: + state: Current state. + """ + state.reward = 10 + state.done = True + + self.tools = [ + Tool.from_function(move_left_hand), + Tool.from_function(move_right_hand), + Tool.from_function(smile_and_wave), + ] + self.state = type(self).State( + messages=[ + Message( + role="user", + content=( + "You are the president of the United States of America." + " Please move both hands at the same time, and then smile" + " and wave." + ), + ) + ] + ) + return self.state.messages, self.tools + + +class TestParallelism: + @pytest.mark.asyncio + async def test_SimpleAgent_can_parallel_call(self) -> None: + env = ParallelizedDummyEnv() + obs, tools = await env.reset() + agent = SimpleAgent() + + # Check parallel tool calls + action, agent_state, _ = await agent.get_asv( + await agent.init_state(tools=tools), obs + ) + selected_tools: set[str] = {tc.function.name for tc in action.value.tool_calls} + assert ( + {"move_left_hand", "move_right_hand"} <= selected_tools + ), f"Agent should've chosen tools in parallel, but it chose {selected_tools}" + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-4-turbo"] + ) + @pytest.mark.asyncio + async def test_exec_tool_calls_handling(self, model_name: str) -> None: + env = ParallelizedDummyEnv(right_hand_broken=True) + obs, tools = await env.reset() + right_hand_tool = tools[1] + agent = SimpleAgent( + llm_model=SimpleAgent.model_fields["llm_model"].default + | {"model": model_name} + ) + agent_state = await agent.init_state(tools=tools) + + # 1. Let's DIY create a ToolRequestMessage for test determinism + request_msg = ToolRequestMessage( + content="stub", tool_calls=[ToolCall.from_tool(right_hand_tool, distance=5)] + ) + agent_state.messages.extend([*obs, request_msg]) + + # 2. Okay, our hand was broken, let's handle it DIY-style + try: + obs, *_ = await env.step(action=request_msg) + except RuntimeError as exc: + obs = [ + Message( + content=f"Failed to execute tools with message:\n{exc}", role="tool" + ) + ] + else: + raise AssertionError("Should have blown up per the test logic.") + + # 2. Well, it looks like both Anthropic and OpenAI don't like DIY-style + # (using a bare Message) because they expect a tool call ID and tool name + # APIConnectionError is for a LiteLLM bug: https://github.com/BerriAI/litellm/issues/4348 + # TODO: remove litellm.APIConnectionError catch after release of + # https://github.com/BerriAI/litellm/commit/5e893ed13e87bc1ff5cfa198bae6faec3ad4af05 + # (will happen when litellm>1.40.22) + with pytest.raises( + (litellm.BadRequestError, litellm.APIConnectionError), match="400" + ): + await agent.get_asv(agent_state, obs) + + # 3. Alright, let's check the agent doesn't blow up if we use a + # ToolResponseMessage as Anthropic and OpenAI expect + await agent.get_asv( + agent_state, + ToolResponseMessage.from_request(request_msg, contents=["null"]), # type: ignore[arg-type] + ) + + # 4. Now that we have confirmed that, let's make sure exec_tool_calls + # can automate this for us + obs = await env.exec_tool_calls( # type: ignore[assignment] + message=request_msg, state=env.state, handle_tool_exc=True + ) + (failure_tool_response,) = obs + assert isinstance(failure_tool_response, ToolResponseMessage) + assert env.RIGHT_HAND_BROKEN_MESSAGE in failure_tool_response.content + await agent.get_asv(agent_state, obs) diff --git a/tests/test_gradients.py b/tests/test_gradients.py new file mode 100644 index 00000000..51e93cf7 --- /dev/null +++ b/tests/test_gradients.py @@ -0,0 +1,379 @@ +import asyncio +import copy +import math +from collections.abc import Callable, Iterable +from functools import partial +from typing import Any, cast + +import numpy as np +import pytest +import tree + +from ldp.graph.common_ops import ConfigOp, FxnOp +from ldp.graph.gradient_estimators import ( + assign_constant_grads, + assign_default_grads, +) +from ldp.graph.op_utils import CallID, compute_graph +from ldp.graph.ops import GradInType, Op, OpCtx, OpResult, ResultOrValue + + +class PoissonSamplerOp(Op): + @staticmethod + def _probability(lam: float, k: int) -> float: + if k < 0: + # negative k can happen when taking a gradient + return 0.0 + return np.exp(-lam) * lam**k / math.factorial(k) + + async def forward(self, lam: float) -> int: + return np.random.poisson(max(0.01, lam)) # noqa: NPY002 + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args, + input_kwargs, + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + # This op has no internal parameters, so we just compute delta_{j,i} in section 4. + lam = max(0.01, input_kwargs["lam"]) + k = ctx.get(call_id, "output").value + + p_k = cls._probability(lam, k) + p_km1 = cls._probability(lam, k - 1) + + # dp(k)/dlam + grad_lam_p = p_km1 - p_k + + # d[lnp(k)]/dlam + grad_lam_lnp = grad_lam_p / p_k + + # define dk/dlam in expectation: dE[k]/dlam = dlam/dlam = 1 + grad_lam_k = 1.0 + + # delta_{j,i} + delta_lam = grad_lam_lnp + (grad_lam_k * cast(float, grad_output)) + + return [], {"lam": delta_lam} + + +class FloatParamOp(Op): + def __init__(self, init_param: float): + self.param = init_param + + async def forward(self) -> float: + return self.param + + @classmethod + def backward( + cls, ctx: OpCtx, input_args, input_kwargs, grad_output: Any, call_id: CallID + ) -> GradInType: + return [], {} + + +class SGDOptimizer: + def __init__(self, op: FloatParamOp, lr: float = 0.01, lr_decay: float = 1.0): + self.op = op + self.lr = lr + self.lr_decay = lr_decay + + self.accumulated_updates: list[float] = [] + + def aggregate(self, samples: Iterable[tuple[OpResult, float]]): + for result, reward in samples: + assert result.call_id is not None + call_ids = self.op.get_call_ids({result.call_id.run_id}) + + # These are delta_{k,j} in equation 20 + grads = [ + cast(float, g) + for g in ( + self.op.ctx.get(call_id, "grad_output") for call_id in call_ids + ) + if g is not None + ] + if not grads: + # this op call was pruned from the backwards call graph + continue + + # We assume FloatParamOp is deterministic, meaning grad_j lnp_j = 0 + # Furthermore, grad_j x_j = 1 (dlam/dlam), so Eq 20 reduces to R*sum_k(delta_{k,j}) + self.accumulated_updates.append(reward * sum(grads)) + + def update(self): + self.op.param += self.lr * cast(float, np.mean(self.accumulated_updates)) + self.accumulated_updates.clear() + self.lr *= self.lr_decay + + +@pytest.mark.parametrize("init_lam", [0.1, 20.0, 10.0]) +@pytest.mark.flaky(reruns=3, only_on=[AssertionError]) +@pytest.mark.asyncio +async def test_poisson_sgd(init_lam: float): + # This test optimizes the rate parameter of a Poisson distribution such that + # the expected value of the distribution is 10. + target = 10 + + def reward_fn(k: int) -> float: + return -np.abs(k - target) + + lam = FloatParamOp(init_lam) + poisson = PoissonSamplerOp() + opt = SGDOptimizer(lam, lr=1.0, lr_decay=0.9) + + @compute_graph() + async def fwd() -> OpResult[int]: + return await poisson(await lam()) + + bsz = 4 + n_epochs = 20 + for _ in range(n_epochs): + samples = await asyncio.gather(*[fwd() for _ in range(bsz)]) + + training_batch: list[tuple[OpResult, float]] = [] + for k in samples: + k.compute_grads() + reward = reward_fn(k.value) + training_batch.append((k, reward)) + + opt.aggregate(training_batch) + opt.update() + + print(lam.param, opt.lr, np.mean([k.value for k in samples])) + + # noisy learning, so just make sure we went in the right direction + assert np.isclose(lam.param, target, atol=4.0) + + +def assign_constant_grads_alter_inputs( + _ctx: OpCtx, + input_args, + input_kwargs, + _grad_output: int, + _call_id: CallID, + grad_val: float = 0.0, + input_func: Callable = lambda x, y: (x, y), +) -> GradInType: + input_args, input_kwargs = copy.deepcopy(input_args), copy.deepcopy(input_kwargs) + input_args, input_kwargs = input_func(input_args, input_kwargs) + return assign_constant_grads(input_args, input_kwargs, grad_val) + + +@pytest.mark.asyncio +async def test_nested_dict_kwargs_grad_aggregation_success(): + """Tests gradient aggregation across two ops that return tree.Structure objects.""" + cfg = {"a": {"b": [1, 2, 3], "c": 4}} + config_op = ConfigOp(cfg) + + op1 = FxnOp[int](lambda _input_dict: 2) + op1.set_name("op1") + op2 = FxnOp[int](lambda _input_dict: 3) + op2.set_name("op2") + agg_op = FxnOp[int](lambda *args: sum(args)) # noqa: FURB111 + + backward1 = partial(assign_constant_grads_alter_inputs, grad_val=1.0) + backward2 = partial(assign_constant_grads_alter_inputs, grad_val=2.0) + + @compute_graph() + async def fwd() -> OpResult[int]: + config = await config_op() + a = await op1(config) + b = await op2(config) + return await agg_op(a, b) + + output = await fwd() + output.compute_grads(backward_fns={"op1": backward1, "op2": backward2}) + assert output.value == 5 + config_op_grad = output.inputs[0][0].inputs[1]["_input_dict"].grad + assert config_op_grad == {"a": {"b": [3.0, 3.0, 3.0], "c": 3.0}} + + +@pytest.mark.asyncio +async def test_nested_dict_kwargs_missing_inner_grad_aggregation_fail(): + """Tests that two ops that return different tree.Structure objects crash in grad aggregation.""" + cfg = {"a": {"b": [1, 2, 3], "c": 4}} + config_op = ConfigOp(cfg) + + op1 = FxnOp[int](lambda _input_dict: 3) + op2_missing_grad = FxnOp[int](lambda _input_dict: 4) + op2_missing_grad.set_name("op2_missing_grad") + agg_op = FxnOp[int](lambda *args: sum(args)) # noqa: FURB111 + + def backward_with_missing_leaf(input_args, input_kwargs): + del input_kwargs["_input_dict"]["a"]["c"] # missing gradient for this key + return input_args, input_kwargs + + backward_with_missing_leaf = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_missing_leaf + ) + + @compute_graph() + async def fwd() -> OpResult[int]: + config = await config_op() + a = await op1(config) + b = await op2_missing_grad(config) + return await agg_op(a, b) + + output = await fwd() + with pytest.raises(ValueError, match="Mismatched gradient structures"): + output.compute_grads( + backward_fns={"op2_missing_grad": backward_with_missing_leaf} + ) + + +@pytest.mark.asyncio +async def test_nested_dict_kwargs_missing_or_extra_inner_grad_ok(): + """Tests that missing or extra gradients for a dict input are not failing.""" + _input_dict = {"a": {"b": [1, 2, 3], "c": 4}} + + op = FxnOp[int](lambda _input_dict: 4) + op.set_name("op") + agg_op = FxnOp[int](lambda *args: sum(args)) # noqa: FURB111 + + def backward_with_missing_leaf(input_args, input_kwargs): + del input_kwargs["_input_dict"]["a"]["c"] # missing gradient for this key + return input_args, input_kwargs + + backward_with_missing_leaf = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_missing_leaf + ) + + @compute_graph() + async def fwd() -> OpResult[int]: + a = await op(_input_dict) + return await agg_op(a) + + output = await fwd() + output.compute_grads(backward_fns={"op": backward_with_missing_leaf}) + assert output.value == 4 + + def backward_with_extra_leaf(input_args, input_kwargs): + input_kwargs["_input_dict"]["a"]["d"] = 3 # add extra key + return input_args, input_kwargs + + backward_with_extra_leaf = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_extra_leaf + ) + + output = await fwd() + output.compute_grads(backward_fns={"op": backward_with_extra_leaf}) + assert output.value == 4 + + +@pytest.mark.asyncio +async def test_args_missing_or_extra_grad(): + op = FxnOp[int](lambda x: x) + agg_op = FxnOp[int](lambda *args: sum(args)) # noqa: FURB111 + agg_op.set_name("agg_op") + + def backward_with_missing_arg(input_args, input_kwargs): + input_args = input_args[:-1] # remove last arg + return input_args, input_kwargs + + backward_with_missing_arg = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_missing_arg + ) + + @compute_graph() + async def fwd() -> OpResult[int]: + a = await op(1) + b = await op(2) + return await agg_op(a, b) + + output = await fwd() + with pytest.raises(ValueError, match="argument 2 is shorter than argument 1"): + output.compute_grads(backward_fns={"agg_op": backward_with_missing_arg}) + + def backward_with_extra_arg(input_args, input_kwargs): + input_args += [3] # add extra arg + return input_args, input_kwargs + + backward_with_extra_arg = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_extra_arg + ) + + output = await fwd() + with pytest.raises(ValueError, match="argument 2 is longer than argument 1"): + output.compute_grads(backward_fns={"agg_op": backward_with_extra_arg}) + + +@pytest.mark.asyncio +async def test_kwargs_missing_or_extra_grad(): + _input1 = 1 + _input2 = 2 + op = FxnOp[int](lambda _input1, _input2: _input1 + _input2) + op.set_name("op") + + def backward_with_missing_kwarg(input_args, input_kwargs): + del input_kwargs["_input1"] + return input_args, input_kwargs + + backward_with_missing_kwarg = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_missing_kwarg + ) + + @compute_graph() + async def fwd() -> OpResult[int]: + return await op(_input1, _input2) + + output = await fwd() + with pytest.raises(ValueError, match="Mismatch between grads"): + output.compute_grads(backward_fns={"op": backward_with_missing_kwarg}) + + def backward_with_extra_kwarg(input_args, input_kwargs): + input_kwargs["_input3"] = 3 + return input_args, input_kwargs + + backward_with_extra_kwarg = partial( + assign_constant_grads_alter_inputs, input_func=backward_with_extra_kwarg + ) + + output = await fwd() + with pytest.raises(ValueError, match="Mismatch between grads"): + output.compute_grads(backward_fns={"op": backward_with_extra_kwarg}) + + +@pytest.mark.asyncio +async def test_assign_default_grads(): + input_args: list[ResultOrValue] = [1, 2] + input_kwargs: dict[str, ResultOrValue] = { + "a": {"b": 3, "c": 4}, + "d": {"e": {"f": 5}}, + } + input_grad_args = [0.1] + input_grad_kwargs = {"a": {"b": 0.2}} + + expected_grad_args = [0.1, 7.0] + expected_grad_kwargs = { + "a": { + "b": 0.2, + "c": 7.0, + }, + "d": {"e": {"f": 7.0}}, + } + + input_grads = (input_grad_args, input_grad_kwargs) + output_grad_args, output_grad_kwargs = assign_default_grads( + input_grads, + input_args, + input_kwargs, + default_grad_val=7.0, + ) + + assert output_grad_args == expected_grad_args + assert output_grad_kwargs == expected_grad_kwargs + + +# test running 2 ops serially without calling @compute_graph +@pytest.mark.asyncio +async def test_serial_ops_diff_run_id(): + op1 = FxnOp[int](lambda x: x + 1) + op2 = FxnOp[int](lambda x: x * 2) + + result1 = await op1(1) + + with pytest.raises(RuntimeError, match="args and kwargs must have the same run_id"): + await op2(result1) diff --git a/tests/test_llms.py b/tests/test_llms.py new file mode 100644 index 00000000..747ff970 --- /dev/null +++ b/tests/test_llms.py @@ -0,0 +1,338 @@ +from typing import ClassVar +from unittest.mock import Mock + +import litellm +import numpy as np +import pytest +from aviary.env import DummyEnv +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from pydantic import BaseModel, Field + +from ldp.llms import ( + JSONSchemaValidationError, + LLMModel, + LLMResult, + MultipleCompletionLLMModel, + validate_json_completion, +) + +from . import CILLMModelNames + + +def test_json_schema_validation() -> None: + # Invalid JSON + mock_completion1 = Mock() + mock_completion1.choices = [Mock()] + mock_completion1.choices[0].message.content = "not a json" + # Invalid schema + mock_completion2 = Mock() + mock_completion2.choices = [Mock()] + mock_completion2.choices[0].message.content = '{"name": "John", "age": "nan"}' + # Valid schema + mock_completion3 = Mock() + mock_completion3.choices = [Mock()] + mock_completion3.choices[0].message.content = '{"name": "John", "age": 30}' + + class DummyModel(BaseModel): + name: str + age: int + + with pytest.raises(JSONSchemaValidationError): + validate_json_completion(mock_completion1, DummyModel) + with pytest.raises(JSONSchemaValidationError): + validate_json_completion(mock_completion2, DummyModel) + validate_json_completion(mock_completion3, DummyModel) + + +@pytest.mark.parametrize( + "model_name", ["gpt-3.5-turbo", CILLMModelNames.ANTHROPIC.value] +) +@pytest.mark.asyncio +async def test_achat(model_name: str) -> None: + model = LLMModel(name=model_name) + response = await model.achat( + messages=[ + Message(content="What are three things I should do today?"), + ] + ) + + assert len(response.choices) == 1 + + # Check we can iterate through the response + async for chunk in await model.achat_iter( + messages=[ + Message(content="What are three things I should do today?"), + ] + ): + assert len(chunk.choices) == 1 + + +@pytest.mark.parametrize( + "model_name", [CILLMModelNames.OPENAI.value, CILLMModelNames.ANTHROPIC.value] +) +@pytest.mark.asyncio +async def test_tools(dummy_env: DummyEnv, model_name: str) -> None: + model = LLMModel(name=model_name) + messages = [ + Message(content="What are three things I should do today?"), + ] + + def get_todo_list(n: int) -> str: + """Get todo list for today. + + Args: + n: number of items to return + """ + return "\n".join(["Go for a walk", "Read a book", "Call a friend"][:n]) + + tool = Tool.from_function(get_todo_list) + dummy_env.tools = [tool] + result = await model.call(messages, tools=dummy_env.tools) + assert result.completion_count > 0 + + # try specifying tool choice + result = await model.call(messages, tools=dummy_env.tools, tool_choice=tool) + assert result.completion_count > 0, "Tool choice failed to execute tool" + assert result.messages + (tool_request_message,) = result.messages + assert isinstance(tool_request_message, ToolRequestMessage) + + new_messages = await dummy_env.exec_tool_calls(tool_request_message) + (new_message,) = new_messages + assert new_message.content == "Go for a walk\nRead a book\nCall a friend" + assert new_message.tool_call_id == tool_request_message.tool_calls[0].id + + def get_todo_list_no_args() -> str: + """Get todo list for today.""" + return "\n".join(["Go for a walk", "Read a book", "Call a friend"]) + + tool = Tool.from_function(get_todo_list_no_args) + dummy_env.tools = [tool] + result = await model.call(messages, tools=dummy_env.tools) + assert result.completion_count > 0 + assert result.messages + (tool_request_message,) = result.messages + assert isinstance(tool_request_message, ToolRequestMessage) + + new_messages = await dummy_env.exec_tool_calls(tool_request_message) + (new_message,) = new_messages + assert new_message.content == "Go for a walk\nRead a book\nCall a friend" + assert new_message.tool_call_id == tool_request_message.tool_calls[0].id + + # ok now try with multiple functions + messages = [ + Message( + content=( + "What items will I have time to accomplish on my todo list today based" + " on my calendar?" + ) + ), + ] + + def get_calendar() -> str: + """Get text version of calendar for today.""" + return "9:00am Wake-up\n10:00pm Go to bed\n" + + tool2 = Tool.from_function(get_calendar) + dummy_env.tools = [tool, tool2] + result = await model.call(messages, tools=dummy_env.tools) + assert result.messages + (tool_request_message,) = result.messages + assert isinstance(tool_request_message, ToolRequestMessage) + new_messages = await dummy_env.exec_tool_calls(tool_request_message) + if model_name.startswith("claude"): + # Anthropic not always so smart + assert 1 <= len(new_messages) <= 2 + else: + assert len(new_messages) == 2 + + # ok now try continuation - I AM NOT SURE IF THIS IS VALID? + # TODO: - supported on openai, but not litellm + # messages = messages + result.messages + new_messages + # result = await model.call(messages) + + +class DummyOutputSchema(BaseModel): + name: str + age: int + + +class TestMultipleCompletionLLMModel: + NUM_COMPLETIONS: ClassVar[int] = 2 + DEFAULT_CONFIG: ClassVar[dict] = {"n": NUM_COMPLETIONS} + MODEL_CLS: ClassVar[type[MultipleCompletionLLMModel]] = MultipleCompletionLLMModel + + async def call_model( + self, model: MultipleCompletionLLMModel, *args, **kwargs + ) -> list[LLMResult]: + return await model.call(*args, **kwargs) + + @pytest.mark.parametrize("model_name", ["gpt-3.5-turbo"]) + @pytest.mark.asyncio + async def test_model(self, model_name: str) -> None: + # Make model_name an arg so that TestLLMModel can parametrize it + # only testing OpenAI, as other APIs don't support n>1 + model = self.MODEL_CLS(name=model_name, config=self.DEFAULT_CONFIG) + messages = [ + Message(role="system", content="Respond with single words."), + Message(content="Hello, how are you?"), + ] + results = await self.call_model(model, messages) + assert len(results) == self.NUM_COMPLETIONS + + for result in results: + assert result.prompt_count > 0 + assert result.completion_count > 0 + prompt_cost, completion_cost = result.prompt_and_completion_costs + assert prompt_cost > 0 + assert completion_cost > 0 + assert result.logprob is None or result.logprob <= 0 + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-3.5-turbo"] + ) + @pytest.mark.asyncio + async def test_streaming(self, model_name: str) -> None: + model = self.MODEL_CLS(name=model_name, config=self.DEFAULT_CONFIG) + messages = [ + Message(role="system", content="Respond with single words."), + Message(content="Hello, how are you?"), + ] + + def callback(_) -> None: + return + + with pytest.raises( + NotImplementedError, + match="Multiple completions with callbacks is not supported", + ): + await self.call_model(model, messages, [callback]) + + @pytest.mark.asyncio + async def test_parameterizing_tool_from_arg_union(self) -> None: + def play(move: int | None) -> None: + """Play one turn by choosing a move. + + Args: + move: Choose an integer to lose, choose None to win. + """ + + results = await self.call_model( + self.MODEL_CLS(name="gpt-3.5-turbo", config=self.DEFAULT_CONFIG), + messages=[Message(content="Please win.")], + tools=[Tool.from_function(play)], + ) + assert len(results) == self.NUM_COMPLETIONS + for result in results: + assert result.messages + assert len(result.messages) == 1 + assert isinstance(result.messages[0], ToolRequestMessage) + assert result.messages[0].tool_calls + assert result.messages[0].tool_calls[0].function.arguments["move"] is None + + @pytest.mark.asyncio + @pytest.mark.flaky(reruns=3, only_on=[AssertionError]) + async def test_output_schema(self) -> None: + model = self.MODEL_CLS(name="gpt-3.5-turbo", config=self.DEFAULT_CONFIG) + messages = [ + Message( + content=( + "My name is Claude and I am 1 year old. What is my name and age?" + ) + ), + ] + results = await self.call_model(model, messages, output_type=DummyOutputSchema) + assert len(results) == self.NUM_COMPLETIONS + for result in results: + assert result.messages + assert len(result.messages) == 1 + assert result.messages[0].content + DummyOutputSchema.model_validate_json(result.messages[0].content) + + @pytest.mark.parametrize("model_name", [CILLMModelNames.OPENAI.value]) + @pytest.mark.asyncio + @pytest.mark.flaky(reruns=3, only_on=[AssertionError]) + async def test_text_image_message(self, model_name: str) -> None: + model = self.MODEL_CLS(name=model_name, config=self.DEFAULT_CONFIG) + + # An RGB image of a red square + image = np.zeros((32, 32, 3), dtype=np.uint8) + image[:] = [255, 0, 0] # (255 red, 0 green, 0 blue) is maximum red in RGB + + results = await self.call_model( + model, + messages=[ + Message.create_message( + text="What color is this square? Respond only with the color name.", + image=image, + ) + ], + ) + assert len(results) == self.NUM_COMPLETIONS + for result in results: + assert ( + result.messages is not None + ), "Expected messages in result, but got None" + assert ( + result.messages[-1].content is not None + ), "Expected content in message, but got None" + assert "red" in result.messages[-1].content.lower() + + +class TestLLMModel(TestMultipleCompletionLLMModel): + NUM_COMPLETIONS: ClassVar[int] = 1 + DEFAULT_CONFIG: ClassVar[dict] = {} + MODEL_CLS: ClassVar[type[MultipleCompletionLLMModel]] = LLMModel + + async def call_model(self, model: LLMModel, *args, **kwargs) -> list[LLMResult]: # type: ignore[override] + return [await model.call(*args, **kwargs)] + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-3.5-turbo"] + ) + @pytest.mark.asyncio + async def test_model(self, model_name: str) -> None: + await super().test_model(model_name) + + @pytest.mark.parametrize( + "model_name", [CILLMModelNames.ANTHROPIC.value, "gpt-3.5-turbo"] + ) + @pytest.mark.asyncio + async def test_streaming(self, model_name: str) -> None: + model = LLMModel(name=model_name) + messages = [ + Message(role="system", content="Respond with single words."), + Message(content="Hello, how are you?"), + ] + content = [] + + def callback(s): + content.append(s) + + result = await model.call(messages, [callback]) + assert result.completion_count > 0 + assert content + + @pytest.mark.asyncio + async def test_output_type_rejected_validation(self) -> None: + class InstructionList(BaseModel): + instructions: list[str] = Field(description="list of instructions") + + model = LLMModel(name=CILLMModelNames.ANTHROPIC.value) + with pytest.raises( + litellm.APIError, match="anthropic does not support parameters" + ): + await model.call( + [Message(content="What are three things I should do today?")], + output_type=InstructionList, + ) + + @pytest.mark.parametrize( + "model_name", + [CILLMModelNames.ANTHROPIC.value, "gpt-4-turbo", CILLMModelNames.OPENAI.value], + ) + @pytest.mark.asyncio + @pytest.mark.flaky(reruns=3, only_on=[AssertionError]) + async def test_text_image_message(self, model_name: str) -> None: + await super().test_text_image_message(model_name) diff --git a/tests/test_memory.py b/tests/test_memory.py new file mode 100644 index 00000000..8f3061e0 --- /dev/null +++ b/tests/test_memory.py @@ -0,0 +1,47 @@ +import pytest + +from ldp.graph.memory import Memory, UIndexMemoryModel +from ldp.llms import EmbeddingModel + + +@pytest.fixture(name="sample_memory") +def fixture_sample_memory() -> Memory: + return Memory( + query="sample string representation", output="observation", value=42.0 + ) + + +@pytest.mark.asyncio +async def test_memory_model_initialization() -> None: + model = UIndexMemoryModel() + assert model.embedding_model is not None + model.model_dump() # Check we can serialize + + model_custom = UIndexMemoryModel( + embedding_model=EmbeddingModel.from_name("text-embedding-3-small") + ) + model_custom.model_dump() # Check we can serialize + + +@pytest.mark.asyncio +async def test_add_then_get_memory(sample_memory: Memory) -> None: + memory_model = UIndexMemoryModel( + embedding_model=EmbeddingModel.from_name("text-embedding-3-small") + ) + async with memory_model.safe_access_index() as index: + assert len(index) == 0, "Should have no memories" + await memory_model.add_memory(sample_memory) + async with memory_model.safe_access_index() as index: + assert len(index) == 1, "Should have one memory" + assert memory_model.memories[0] == sample_memory + result = await memory_model.get_memory("sample query", matches=1) + assert len(result) == 1 + assert result[0] == sample_memory + + +@pytest.mark.asyncio +async def test_initialization_without_embedding(): + model = UIndexMemoryModel() + assert isinstance( + model.embedding_model, EmbeddingModel + ), "Default embedding model should be set" diff --git a/tests/test_modules.py b/tests/test_modules.py new file mode 100644 index 00000000..5ef93ba5 --- /dev/null +++ b/tests/test_modules.py @@ -0,0 +1,454 @@ +from unittest.mock import Mock, patch + +import pytest +from aviary.env import DummyEnv +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from pytest_subtests import SubTests + +from ldp.agent import ReActAgent +from ldp.graph.modules import ( + ReActModule, + ReflectModule, + ReflectModuleConfig, + parse_message, +) +from ldp.graph.ops import OpResult + +from . import CILLMModelNames + + +@pytest.mark.asyncio +async def test_reflect_module() -> None: + config = ReflectModuleConfig( + llm_model={"model": CILLMModelNames.ANTHROPIC.value, "temperature": 0} + ) # Lower temperature for more deterministic responses + reflect_module = ReflectModule(config) + context = "I am happy. How do I feel?" + response = "You are sad." + result = await reflect_module(context, response) + assert isinstance(result, OpResult) + assert len(result.value) > 0 + assert result.value != response + # Check both emotions to work around LLM not responding with "happy" + # For example: "It sounds like you are feeling joyful." + assert "happy" in result.value or "sad" not in result.value + + +@pytest.fixture(name="mock_tools") +def fixture_mock_tools() -> list[Tool]: + return [Mock(spec=Tool, name=f"Tool{i}") for i in range(3)] + + +class TestReActModule: + @pytest.mark.asyncio + async def test_templating(self, dummy_env: DummyEnv) -> None: + obs, tools = await dummy_env.reset() + module = ReActModule(ReActAgent.model_fields["llm_model"].default) + with patch( + "ldp.graph.common_ops.LLMCallOp.forward", + return_value=ToolRequestMessage( + role="assistant", + content=f"Action: {tools[0].info.name}\nAction Input: stub", + ), + ) as mock_forward: + await module(obs, tools=tools) + mock_forward.assert_awaited_once() + assert mock_forward.await_args + assert mock_forward.await_args[1]["msgs"][0] == Message( + role="system", + content=( + "Answer the following questions as best you can. You have access to the" + " following tools:\n\nNAME: print_story\n\nSYNOPSIS:\n " + " print_story(string story)\n\nDESCRIPTION:\n Print a" + " story.\n\nPARAMETERS:\n story (string): Story to print.\n\nNAME:" + " cast_float\n\nSYNOPSIS:\n cast_float(string" + " x)\n\nDESCRIPTION:\n Cast the input argument x to a" + " float.\n\nPARAMETERS:\n x (string): No description" + " provided.\n\nNAME: cast_int\n\nSYNOPSIS:\n " + " cast_int(number x)\n\nDESCRIPTION:\n Cast the input argument x to" + " an integer.\n\nPARAMETERS:\n x (number): No description" + " provided.\n\nUse the following format:\n\nThought: you should" + " always think about what to do\nAction: the action to take, should be" + " one of [print_story, cast_float, cast_int]\nAction Input: comma" + " separated list of inputs to action as python tuple\nObservation: the" + " result of the action\n... (this Thought/Action/Action" + " Input/Observation can repeat N times)\n\nExample:\n\nThought: I need" + ' to use the get_weather tool\nAction: get_weather\nAction Input: "New' + ' York", 7\nObservation: The 7 day forecast for New York is [...]' + ), + ) + + @pytest.mark.asyncio + async def test_react_parse(self, subtests: SubTests) -> None: # noqa: PLR0915, C901 + def zero_arg() -> int: + """A test function.""" + return 0 + + def one_arg(x: int) -> int: + """A test function. + + Args: + x: x + """ + return x + + def one_string_arg(x: str) -> str: + """A test function. + + Args: + x: x + """ + return x + + def one_alphanumeric_string_arg(x: str) -> str: + """A test function. + + Args: + x: x + """ + return x + + def two_arg(x: int, y: str) -> int: + """A test function. + + Args: + x: x + y: y + """ + return x + len(y) + + def one_optional_arg(x: int = 1) -> int: + """A test function. + + Args: + x: x with a default. + """ + return x + + def mixed_required_args(x: int, y: int = 1) -> int: + """A test function. + + Args: + x: x + y: y + """ + return x + y + + def two_string_args(x: str, y: str) -> str: + """A test function. + + Args: + x: x + y: y + """ + return x + y + + def three_string_args(x: str, y: str, z: str) -> str: + """A test function. + + Args: + x: x + y: y + z: z + """ + return x + y + z + + def mixed_string_int_args(x: str, y: int, z: str) -> str: + """A test function. + + Args: + x: x + y: y + z: z + """ + return (x + z) * y + + def complex_optionals( + x: str, y: str, a: int | None = None, b: int | None = None + ) -> str: + """A complex test function with optionals. + + Args: + x: x + y: y + a: a + b: b + """ + return x + y + str(a) + str(b) + + tools = [ + Tool.from_function(zero_arg), + Tool.from_function(one_arg), + Tool.from_function(one_string_arg), + Tool.from_function(one_alphanumeric_string_arg), + Tool.from_function(two_arg), + Tool.from_function(one_optional_arg), + Tool.from_function(mixed_required_args), + Tool.from_function(two_string_args), + Tool.from_function(three_string_args), + Tool.from_function(mixed_string_int_args), + Tool.from_function(complex_optionals), + ] + + with subtests.test("no args"): + message = Message( + content=""" + + Of course, I would be happy to follow your format. + + This will be fun! + + I cannot wait to begin!!! + + I had a thought? What if I don't follow it + + OK... I will: + + Thought: you should always think about what to do + Action: zero_arg + Action Input: + Observation: the result of the action + + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "zero_arg" + assert not tool_call.function.arguments + + with subtests.test("one arg"): + message = Message( + content=""" + + Of course, I would be happy to follow your format. + + Thought: + Action: one_arg + Action Input: 1 + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "one_arg" + assert tool_call.function.arguments == {"x": 1} + + with subtests.test("one string arg"): + message = Message( + content=""" + + Of course, I would be happy to follow your format. + + Thought: + Action: one_string_arg + Action Input: Bertrand Russell + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "one_string_arg" + assert tool_call.function.arguments == {"x": "Bertrand Russell"} + + with subtests.test("one alphanumeric string arg"): + message = Message( + content=""" + + Of course, I would be happy to follow your format. + + Thought: + Action: one_alphanumeric_string_arg + Action Input: R2D2 from Star Wars + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "one_alphanumeric_string_arg" + assert tool_call.function.arguments == {"x": "R2D2 from Star Wars"} + + with subtests.test("two args"): + message = Message( + content=""" + Hey, here's some code: + + ```js + console.log("hello world!"!) + ``` + + just thought you'd want to know!!!! + + Thought: A long thought + what do I do next??? + Oh right + Note: spacing below is part of test + Action: two_arg + Action Input: 1, 2 + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "two_arg" + assert tool_call.function.arguments == {"x": 1, "y": 2} + + with subtests.test("nested args"): + # check that parser can handle if the action input + # is itself already a tuple + message = Message( + content=""" + Hey, here's some code: + + ```js + console.log("hello world!"!) + ``` + + just thought you'd want to know!!!! + + Thought: A long thought + what do I do next??? + Oh right + Action:one_arg + Action Input: (1,) + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "one_arg" + assert tool_call.function.arguments == {"x": 1} + + with subtests.test("optional arg"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: one_optional_arg + Action Input: () + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "one_optional_arg" + assert not tool_call.function.arguments + + with subtests.test("mixed args"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: mixed_required_args + Action Input: (1,) + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "mixed_required_args" + assert tool_call.function.arguments == {"x": 1} + + with subtests.test("two mixed args"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: mixed_required_args + Action Input: (1,2) + """ + ) + tool_call = parse_message(message, tools).tool_calls[0] + assert tool_call.function.name == "mixed_required_args" + assert tool_call.function.arguments == {"x": 1, "y": 2} + + with subtests.test("two string args"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: two_string_args + Action Input: (Bertrand Russell, Friedrich Nietzsche) + """ + ) + tool_request_message = parse_message(message, tools) + assert isinstance(tool_request_message, ToolRequestMessage) + assert tool_request_message.tool_calls[0].function.name == "two_string_args" + assert tool_request_message.tool_calls[0].function.arguments == { + "x": "Bertrand Russell", + "y": "Friedrich Nietzsche", + } + + with subtests.test("three string args"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: three_string_args + Action Input: (Bertrand Russell, Friedrich Nietzsche, Immanuel Kant) + """ + ) + tool_request_message = parse_message(message, tools) + assert isinstance(tool_request_message, ToolRequestMessage) + assert ( + tool_request_message.tool_calls[0].function.name == "three_string_args" + ) + assert tool_request_message.tool_calls[0].function.arguments == { + "x": "Bertrand Russell", + "y": "Friedrich Nietzsche", + "z": "Immanuel Kant", + } + + with subtests.test("mixed string and integer args"): + message = Message( + content=""" + Thought: A long thought + what do I do next??? + Oh right + Action: mixed_string_int_args + Action Input: (Bertrand Russell, 2, Immanuel Kant) + """ + ) + tool_request_message = parse_message(message, tools) + assert isinstance(tool_request_message, ToolRequestMessage) + assert ( + tool_request_message.tool_calls[0].function.name + == "mixed_string_int_args" + ) + assert tool_request_message.tool_calls[0].function.arguments == { + "x": "Bertrand Russell", + "y": 2, + "z": "Immanuel Kant", + } + + with subtests.test("Attempting to put it into python ticks"): + text = """ + ```python + Thought: seq-2f794d237c85 contains the DsRed gene, a variant of RFP. I need to extract this gene and clone it into an E. coli-compatible vector. First, I'll slice out the RFP gene. + + Action: complex_optionals + Action Input: ann-seq-5d7de2e1-f32a, EGE3L, 1697, 2372 + ```""" + + message = Message(content=text) + tool_request_message = parse_message(message, tools) + + with subtests.test("A bunch of actions still only calls one tool"): + text = """ + ```python + Thought: seq-2f794d237c85 contains the DsRed gene, a variant of RFP. I need to extract this gene and clone it into an E. coli-compatible vector. First, I'll slice out the RFP gene. + + Action: complex_optionals + Action Input: ann-seq-5d7de2e1-f32a, EGE3L, 1697, 2372 + + Action: complex_optionals + Action Input: ann-seq-5d7de2e1-f32a, EGE3L, 1697, 2372 + + Action: complex_optionals + Action Input: ann-seq-5d7de2e1-f32a, EGE3L, 1697, 2372 + ```""" + + message = Message(content=text) + tool_request_message = parse_message(message, tools) + assert len(tool_request_message.tool_calls) == 1 + + with subtests.test("a single integer arg"): + _, tools = await DummyEnv().reset() + test_message = Message( + role="assistant", + content="Thought: I think.\nAction: cast_int\nAction Input: (1.1)", + ) + parsed_message = parse_message(test_message, tools) + assert isinstance(parsed_message, ToolRequestMessage) + assert parsed_message.tool_calls[0].function.name == "cast_int" + assert parsed_message.tool_calls[0].function.arguments == {"x": 1.1} diff --git a/tests/test_ops.py b/tests/test_ops.py new file mode 100644 index 00000000..ddb12fbc --- /dev/null +++ b/tests/test_ops.py @@ -0,0 +1,338 @@ +import asyncio +import random +from typing import TypeVar, cast +from uuid import UUID + +import litellm +import pytest +import tree +from aviary.env import DummyEnv +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage + +from ldp.graph.common_ops import ConfigOp, FxnOp, LLMCallOp, PromptOp +from ldp.graph.gradient_estimators import straight_through_estimator as ste +from ldp.graph.op_utils import ( + CallID, + compute_graph, + eval_mode, + get_call_id, + get_run_id, + set_training_mode, +) +from ldp.graph.ops import GradInType, Op, OpCtx, OpResult, ResultOrValue, TOutput +from ldp.llms import LLMModel, append_to_sys + + +class StatefulFxnOp(FxnOp[TOutput]): + async def forward(self, *args, **kwargs) -> TOutput: + result = await super().forward(*args, **kwargs) + self.ctx.update(get_call_id(), "observed", value=True) + return result + + +@pytest.mark.asyncio +async def test_call_ids() -> None: + async def fxn(x) -> int: + await asyncio.sleep(x) + return int(x > 0) + + op = StatefulFxnOp[int](fxn) + # In this test, we want to make sure that op calls can't interfere with each + # each other's compute graphs. So we launch two tasks, where the second + # one should finish before the first, allowing for the possibility of a + # clobber. + xs = [1, 0.5] + + async def call_op(x) -> tuple[OpResult, UUID]: + async with compute_graph(): + result = await op(x) + return result, get_run_id() + + results = await asyncio.gather(*[call_op(x) for x in xs]) + + for result, run_id in results: + assert run_id == result.call_id.run_id, "Inconsistent run IDs" + assert op.ctx.get(key="observed", call_id=result.call_id) is True + + +@pytest.mark.asyncio +async def test_FxnOp() -> None: + def fxn(x) -> int: + return int(x > 0) + + op = FxnOp(fxn) + async with compute_graph(): + op_result = await op(1) + assert op_result.value == 1 + assert op_result.logprob is None + + with pytest.raises(ValueError, match="No gradients"): + op.get_input_grads(op_result.call_id) + op_result.compute_grads(1.0, backward_fns={FxnOp: ste}) + assert op_result.grad == 1.0, "We didn't persist the starting gradient" + assert op.get_input_grads(op_result.call_id) == ([], {"x": 1}) + + +T = TypeVar("T") + + +@pytest.mark.parametrize( + "op_return", + [ + (1, int), + (1.0, float), + ("1", str), + (Message(content="1"), Message), + (ToolRequestMessage(content="1"), ToolRequestMessage), + ], +) +@pytest.mark.parametrize("training", [True, False]) +@pytest.mark.asyncio +async def test_opresult_typing(op_return: tuple[T, type[T]], training: bool) -> None: + op_return_value, op_return_type = op_return + op = FxnOp[op_return_type](lambda: op_return_value) # type: ignore[valid-type] + + set_training_mode(training) + async with compute_graph(): + op_result = await op() + + # Confirm that the OpResult's type matches the value's type + assert isinstance(op_result, OpResult[op_return_type]) # type: ignore[valid-type,misc] + + +class TestLLMCallOp: + @pytest.mark.asyncio + async def test_cost_tracking(self) -> None: + model_name = "gpt-3.5-turbo" + + class LLMCallingEnv(DummyEnv): + """Showing how environments can use LiteLLM to track their own costs.""" + + def __init__(self): + self.total_cost = 0.0 + + async def reset(self) -> tuple[list[Message], list[Tool]]: + async def generate_story() -> str: + """Generate a story.""" + response = litellm.completion( + model=model_name, + messages=[ + {"content": "Please write a 5 word story", "role": "user"} + ], + ) + self.total_cost += litellm.completion_cost(response) + return response.choices[0].message.content + + self.state = type(self).State( + messages=[Message(content="Generate a story")] + ) + self.tools = [Tool.from_function(generate_story)] + return self.state.messages, self.tools + + env = LLMCallingEnv() + obs, tools = await env.reset() + config = {"model": model_name, "temperature": 0.1} + llm_op = LLMCallOp() + + # Perform one step + async with compute_graph(): + op_result = cast( + OpResult[ToolRequestMessage], + await llm_op(config, msgs=obs, tools=tools), + ) + await env.exec_tool_calls(op_result.value) + + # LLMCallOp track cost using run context + result = llm_op.ctx.get(op_result.call_id, "result") + prompt_cost, completion_cost = result.prompt_and_completion_costs + assert prompt_cost > 0 + assert completion_cost > 0 + + # Environment tracks its internal costs + assert env.total_cost > 0 + + @pytest.mark.asyncio + async def test_empty_tools(self) -> None: + llm_call_op = LLMCallOp() + message_result = await llm_call_op( + LLMModel.model_fields["config"].default, + msgs=[Message(content="Hello")], + tools=[], + ) + assert isinstance(message_result.value, ToolRequestMessage) + assert not message_result.value.tool_calls + + +@pytest.mark.asyncio +async def test_simple_prompt_graph() -> None: + config = ConfigOp(config={"name": "hello"}) + prompt = PromptOp("Hello, {name}! You are {age} years old.") + + async with compute_graph(): + c = await config() + s = await prompt(c, age=20) + assert s.value == "Hello, hello! You are 20 years old." + + my_loss_grad = -2.0 + s.compute_grads(my_loss_grad, backward_fns={PromptOp: ste}) + grad = prompt.get_input_grads(s.call_id) + assert grad[1]["age"] == -2.0 + + +@pytest.mark.asyncio +async def test_llm_call_graph() -> None: + sys_prompt_op = PromptOp( + "Respond by first planning your actions, then write code, " + "inspect its effect, and reason about correctness" + ) + user_prompt_op = PromptOp("What is the result of this math equation: {equation}?") + + package_msg_op = FxnOp(append_to_sys) + config = { + "model": "gpt-3.5-turbo-0125", + "temperature": 0.1, + "logprobs": True, + "top_logprobs": 1, + } + config_op = ConfigOp(config=config) + + # Now forward pass + my_equation = "2 + 2" + async with compute_graph(): + sys_prompt = await sys_prompt_op() + user_prompt = await user_prompt_op(equation=my_equation) + package_msg = await package_msg_op(user_prompt, sys_prompt) + c = await config_op() + result = await (llm_op := LLMCallOp())(c, package_msg) + assert result.value is not None + assert len(result.value.content) > 10 # type: ignore[arg-type] + + output_grad = -2.0 # some grad accrued from result + result.compute_grads([output_grad]) + + # check some grads are present + _, g = llm_op.get_input_grads(result.call_id) + assert g["config"] == dict.fromkeys(config, 0.0) + assert g["msgs"] == 0.0 + + _, g = config_op.get_input_grads(c.call_id) + assert not g # config op has no inputs + + _, g = user_prompt_op.get_input_grads(user_prompt.call_id) + assert g["equation"] == 0 + + # get examples + assert llm_op.get_examples() + + # now inference pass + with eval_mode(): + async with compute_graph(): + sys_prompt = await sys_prompt_op() + user_prompt = await user_prompt_op(equation=my_equation) + package_msg = await package_msg_op(user_prompt, sys_prompt) + c = await config_op() + result = await (llm_op := LLMCallOp())(c, package_msg) + assert result.value.content is not None + assert len(result.value.content) > 10 + + +@pytest.mark.asyncio +async def test_nested_op(): + """Test that we can have a forward function that calls another Op.""" + inner_op_a = FxnOp(lambda x: x + 1) + inner_op_b = FxnOp(lambda x: x + 1) + + async def nested_op(x: ResultOrValue[float]) -> OpResult[float]: + async with compute_graph(): + x = await inner_op_a(x) + return await inner_op_b(x) + + result = await nested_op(1) + assert result.value == 3 + + input_args, input_kwargs = result.inputs + assert not input_args + assert input_kwargs + + result.compute_grads(1) + assert result.grad == 1 + + +class PickFirstOp(Op[int]): + async def forward(self, *args: int) -> int: + return args[0] + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args, + input_kwargs, + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + return [grad_output] + [None] * (len(input_args) - 1), {} + + +@pytest.mark.asyncio +async def test_multiple_op_calls(): + op_a = FxnOp[int](lambda x: x) + op_b = PickFirstOp() + + # compute graph: A -> B <- A + async with compute_graph(): + n_samples = 2 + samples = await asyncio.gather(*[op_a(i) for i in range(n_samples)]) + selected = await op_b(*samples) + assert len(samples) == n_samples + + run_id = get_run_id() + op_a_call_ids = op_a.get_call_ids({run_id}) + assert len(op_a_call_ids) == n_samples + assert selected.value == 0 + + selected.compute_grads(10.0, backward_fns={FxnOp: ste}) + for call_id in op_a_call_ids: + call_idx = op_a.ctx.get(call_id, "output").value + if call_idx == 0: + # first call - grads should've backproped + assert op_a.get_input_grads(call_id)[1]["x"] == 10.0 + else: + # second call - compute graph should have been pruned by + # PickFirstOp + with pytest.raises( + ValueError, match=r"No gradients have been computed for .*" + ): + op_a.get_input_grads(call_id) + + +@pytest.mark.asyncio +async def test_branching_compute_graph(): + # The goal of this test is to make sure that gradients are properly aggregated + # when the output of an op is consumed by multiple downstream ops. We expect + # the gradients from each downstream op to be summed together. + + op_a = FxnOp[int](lambda x: x) + op_b: FxnOp[int] = FxnOp(lambda x: x + random.randint(1, 10)) + op_c = FxnOp[int](lambda *x: sum(x)) # noqa: FURB111 + + # compute graph: a -> b1, b2 -> c + async with compute_graph(): + a = await op_a(3) + b1 = await op_b(a) + b2 = await op_b(a) + c = await op_c(b1, b2) + + loss_grad = -5.0 + c.compute_grads(loss_grad, backward_fns={FxnOp: ste}) + + # b1, b2, c receive only one copy of gradient + for result in (c, b2, b2): + assert result.grad == loss_grad + + # since a is used by two op calls, it should receive 2x the gradient: + assert a.grad == loss_grad * 2 + + # and 2x gradient should be passed back to the input + assert op_a.get_input_grads(a.call_id)[1]["x"] == loss_grad * 2 diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py new file mode 100644 index 00000000..50521f8f --- /dev/null +++ b/tests/test_optimizer.py @@ -0,0 +1,661 @@ +from typing import Any + +import litellm +import pytest +import tenacity +import tree +from aviary.env import DummyEnv +from litellm.caching import Cache +from torch import nn + +from ldp.agent import Agent, DQNAgent, MemoryAgent, ReActAgent +from ldp.alg.optimizer import ( + MemoryOpt, + Optimizer, + default_optimizer_factory, +) +from ldp.alg.optimizer.ape import APEOpt, APEScoreFn, Example +from ldp.alg.optimizer.dqn import DQNOptimizer, DQNOptimizerConfig, DQNTarget +from ldp.alg.optimizer.openai_sft_optimizer import OpenAISFTOpt, OpenAISFTOptConfig +from ldp.alg.rollout import RolloutManager +from ldp.data_structures import Trajectory, Transition +from ldp.graph.common_ops import FxnOp, LLMCallOp, MemoryOp, PromptOp +from ldp.graph.gradient_estimators import ( + llm_straight_through_estimator as llm_ste, +) +from ldp.graph.gradient_estimators import ( + straight_through_estimator as ste, +) +from ldp.graph.memory import Memory +from ldp.graph.modules import EmbeddingDQNOp +from ldp.graph.op_utils import CallID, compute_graph, eval_mode +from ldp.graph.ops import GradInType, Op, OpCtx, OpResult +from ldp.llms import LLMModel, append_to_sys + +from . import CILLMModelNames +from .conftest import IN_GITHUB_ACTIONS + + +@pytest.mark.parametrize( + ("agent_cls", "optimizer_cls", "optimizer_kwargs"), + [ + (DQNAgent, DQNOptimizer, {}), + (MemoryAgent, MemoryOpt, {}), + (ReActAgent, APEOpt, {"score_fn": APEScoreFn.GRADIENT}), + ], +) +def test_optimizer_factory( + agent_cls: type[Agent], optimizer_cls: type[Optimizer], optimizer_kwargs: dict +): + agent = agent_cls() + opt = default_optimizer_factory(agent, optimizer_cls, **optimizer_kwargs) + assert isinstance(opt, optimizer_cls) + + +class TestDQNOptimizer: + @pytest.mark.asyncio + async def test_update(self) -> None: + dqn = EmbeddingDQNOp(num_layers=1) + assert isinstance(dqn.network, nn.Linear) + for net in (dqn.network, dqn.target_network): + net.weight.data.fill_(0.0) + net.weight.requires_grad = False + net.bias.data.fill_(0.0) + + with eval_mode(): + assert (await dqn("hello")).value == 0.0 + + agent = DQNAgent(num_actions_to_sample=2, dqn=dqn) + + tau = 0.5 + opt = DQNOptimizer.from_agent( + agent, + config=DQNOptimizerConfig( + lr=0.1, + batch_size=4, + train_buffer_size=18, + val_buffer_size=2, + soft_update_tau=tau, + ), + ) + + # Make sure the network is getting swapped out + with dqn.use_target_network(): + assert dqn.async_network.module is dqn.target_network + + # See if the update propagates correctly + dqn.network.bias.data.fill_(1.0) + opt._update_target_network() + assert (dqn.target_network.bias == 0.5).all() + + # Make sure we are getting the updated target network in the forward pass + with dqn.use_target_network(), eval_mode(): + assert (await dqn("hello")).value == 0.5 + + # Make sure the policy network didn't change in the update + with eval_mode(): + assert (await dqn("hello")).value == 1.0 + + # Reset our Qs + dqn.network.bias.data.fill_(0.0) + dqn.target_network.bias.data.fill_(0.0) + + # Ok, now let's run a full training iteration and confirm that things move in the + # right direction + rollout = RolloutManager(agent) + + while True: # Do-while on failed trajectory + traj, *_ = ( + await rollout.sample_trajectories( + environment_factory=lambda: DummyEnv(end_immediately=False), + max_steps=2, + ) + )[0] # batch size defaults to 1 + if not traj.failed: + # Sometimes the agent will crash DummyEnv, so check it didn't fail. + # TODO: don't use RolloutManager for this simple test; just manually + # construct a dummy trajectory + break + + assert len(traj.steps) == 2 + traj.steps[0].reward = 0.0 + traj.steps[1].reward = 1.0 + traj.steps[1].truncated = False + traj.steps[1].done = True + + for step in traj.steps: + assert step.action + step.action.compute_grads() + + # add a lot of data to the training buffer + opt.aggregate([traj] * 10) + # Here's what should happen: + # - Q^target should always return 0, so: + # - in the terminal state, target = r=1 + # - in the other state, target = r+gamma*Q^target=0 + # - The policy network bias should go towards 0.5 (avg of 0, 1). + # The weight should stay at 0 (no grad). + # - _update_target_network() runs after the optimizer updates, so the target network + # should be at tau*policy + (1-tau)*target = 0.5 + await opt.update() + + bias = dqn.network.bias.item() + assert bias == pytest.approx(0.5, abs=0.25) + assert (dqn.network.weight == 0.0).all() + assert dqn.target_network.bias.item() == pytest.approx(tau * bias, abs=0.001) + assert (dqn.target_network.weight == 0.0).all() + + @pytest.mark.parametrize( + "dqn_target", [DQNTarget.Q, DQNTarget.SARSA, DQNTarget.MC_SARSA] + ) + @pytest.mark.asyncio + @pytest.mark.usefixtures("seed_zero") + async def test_convergence(self, dqn_target: DQNTarget) -> None: + # going to make a lot of embedding calls, so create a cache + litellm.cache = Cache() + + agent = DQNAgent(num_actions_to_sample=2) + opt = DQNOptimizer.from_agent( + agent, + config=DQNOptimizerConfig( + lr=0.01, + batch_size=8, + train_buffer_size=18, + val_buffer_size=2, + soft_update_tau=1.0, + target=dqn_target, + ), + ) + + rollout = RolloutManager(agent) + + results: list[tuple[Trajectory, Any]] = await rollout.sample_trajectories( + environment_factory=lambda: DummyEnv(end_immediately=False), + max_steps=2, + batch_size=6, + ) + + for traj, _ in results: + if traj.failed: + continue + + assert len(traj.steps) == 2 + traj.steps[0].reward = 0.0 + traj.steps[1].reward = 1.0 + traj.steps[1].truncated = False + traj.steps[1].done = True + + for step in traj.steps: + assert step.action + step.action.compute_grads() + + # Add extra data to ensure convergence + opt.aggregate([traj] * 4) + await opt.update() + + obs, tools = await DummyEnv(end_immediately=False).reset() + agent_state = await agent.init_state(tools) + with eval_mode(): + _, _, q = await agent.get_asv(agent_state, obs) + + assert abs(q - 1) < 0.2, "Expected Q-value to be close to 1 after training" + + +class SquaredErrorLoss(Op[int]): + async def forward(self, y: str, yhat: str) -> int: + try: + return (int(y) - int(yhat)) ** 2 + except ValueError: # For example, yhat may be "I guess the number is 7." + return 100 + + @classmethod + def backward( + cls, + ctx: OpCtx, + input_args, + input_kwargs, + grad_output: tree.Structure, + call_id: CallID, + ) -> GradInType: + try: + y = int(input_kwargs["y"]) + yhat = int(input_kwargs["yhat"]) + except ValueError: + loss = ctx.get(call_id, "output").value + return [], {"y": loss, "yhat": loss} # Straight-through approximation + # d/dy of (y - y^)^2 = 2 (y - y^), and d/dy^ of (y - y^)^2 = -2 (y - y^) + # return dL/dy, dL/dy^ + # Note that grad_output is ignored because this is assumed to be a terminal scalar, + # much like calling loss.backward() in pytorch. + return [], { + "y": 2 * (y - yhat), + "yhat": -2 * (y - yhat), + } + + +@pytest.mark.asyncio +async def test_ape_optimizer() -> None: + sys_prompt_op = PromptOp("Guess a number based on the input word.") + package_msg_op = FxnOp(append_to_sys) + llm = LLMModel() + llm.config["max_retries"] = 3 # we seem to be hitting rate limits frequently + llm_call_op = LLMCallOp() + strip_op = FxnOp(lambda x: x.content) + loss_op = SquaredErrorLoss() + + @compute_graph() + async def forward(xi_: str, yi_: str) -> OpResult[int]: + """Perform a forward pass through the model to the resultant SE loss.""" + s = await sys_prompt_op() + m = await package_msg_op(xi_, s) + c = await llm_call_op(llm.config, m) + yh = await strip_op(c) + return await loss_op(yi_, yh) + + # Sequentially run a forward pass for each (x, y) + x = ["Hello", "Day", "Bar"] + y = [str(len(xi)) for xi in x] # Number to guess should be word's length + opt = APEOpt( + llm=llm, + llm_call_op=llm_call_op, + prompt_op=sys_prompt_op, + good_examples=[ + Example(input=x, output=y, score=0) for x, y in zip(x, y, strict=True) + ], + score_fn=APEScoreFn.GRADIENT, + ) + assert opt.trace == [sys_prompt_op.prompt] + + trajectory = Trajectory() + for i, (xi, yi) in enumerate(zip(x, y, strict=True)): + loss = await forward(xi, yi) + if i == 0: + assert loss.value > 0, ( + "First example's loss should be non-zero - otherwise, no learning" + " signal." + ) + # Sets grad_output and grad_input in context, to be used by optimizer + loss.compute_grads(backward_fns={LLMCallOp: llm_ste, FxnOp: ste}) + + # APE in gradient mode is only going to pay attention to the action, so set + # placeholders for the other attributes + trajectory.steps.append( + Transition( + timestep=0, + agent_state=None, + next_agent_state=None, + observation=[], + next_observation=Transition.NO_OBSERVATION, + action=loss, + reward=0, + done=False, + ) + ) + + # Run full optimizer step + for i in range(3): # Retries + opt.aggregate([trajectory]) + assert opt.good_examples == [ + Example(input=x, output=y, score=0) for x, y in zip(x, y, strict=True) + ] + + await opt.update() + assert not opt.examples, "Expected reset of examples after update." + assert len(opt.trace) == i + 2, "Expected new prompt to be recorded." + + with eval_mode(): + if (await forward(xi, yi)).value == 0: # pylint: disable=undefined-loop-variable + return + + raise AssertionError("Failed to complete optimization after retries.") + + +@pytest.mark.skipif( + IN_GITHUB_ACTIONS, + reason="Flaky test because of the stochasticity of LLM completion", +) +@pytest.mark.parametrize( + ("num_transitions_per_traj", "opt_config"), + [ + (1, {"buffer_size": 10, "return_threshold": 5.0}), + # (10, {"buffer_size": 20, "return_threshold": None}), # Skipping - takes 4+ minutes + ], +) +@pytest.mark.usefixtures("seed_zero") +async def test_openai_sft_optimizer( + num_transitions_per_traj: int, opt_config: dict +) -> None: + prompt_op = PromptOp("Who Are you?") + package_msg_op = FxnOp(append_to_sys) + llm_config = {"model": CILLMModelNames.OPENAI.value} + llm_call_op = LLMCallOp() + + @compute_graph() + async def forward(): + """Perform a forward pass through the model and calculate the loss.""" + s = await prompt_op() + msg = await package_msg_op(s) + return await llm_call_op(llm_config, msg) + + opt = OpenAISFTOpt(llm_call_op=llm_call_op, config=OpenAISFTOptConfig(**opt_config)) + + # Fixed set of rewards for the validation set + fixed_rewards = [6, 5, 7, 9, 3, 6, 8, 4, 1, 10] + + # Build validation set + for _i in range(10): # Generate 10 validation examples + res_list = [await forward() for _ in range(num_transitions_per_traj)] + rewards = fixed_rewards[:num_transitions_per_traj] + for res, _ in zip( + res_list, rewards, strict=False + ): # Ignore the reward variable + res.compute_grads(backward_fns={FxnOp: ste}) + + trajectory = Trajectory( + steps=[ + Transition( + timestep=0, + agent_state=None, + next_agent_state=None, + observation=Transition.NO_OBSERVATION, + next_observation=Transition.NO_OBSERVATION, + action=res, + reward=reward, + done=False, + ) + for res, reward in zip(res_list, rewards, strict=False) + ] + ) + + opt.aggregate_trajectory(trajectory, buffer_type="validation") + + # Build training set + for _i in range(20): # Re-run until buffer is full + res_list = [await forward() for _ in range(num_transitions_per_traj)] + rewards = [10 for _ in range(num_transitions_per_traj)] + for res, _ in zip( + res_list, rewards, strict=False + ): # Ignore the reward variable + res.compute_grads(backward_fns={FxnOp: ste}) + + trajectory = Trajectory( + steps=[ + Transition( + timestep=0, + agent_state=None, + next_agent_state=None, + observation=Transition.NO_OBSERVATION, + next_observation=Transition.NO_OBSERVATION, + action=res, + reward=reward, + done=False, + ) + for res, reward in zip(res_list, rewards, strict=False) + ] + ) + + opt.aggregate_trajectory(trajectory) + + await opt.update() + + # Check that training examples were actually stored in the buffer + assert len(opt.train_buffer) >= 2, "Expected examples to be stored in the buffer." + + with eval_mode(): + for _ in range(5): + res = await forward() + if "I'm" in res.value.content or "I am" in res.value.content: + return + raise AssertionError("Failed to perform expert iteration training") + + +@pytest.mark.asyncio +@pytest.mark.skipif( + IN_GITHUB_ACTIONS, + reason="Flaky test because of the stochasticity of LLM completion", +) +@pytest.mark.usefixtures("seed_zero") +async def test_openai_sft_optimizer_return_threshold() -> None: + prompt_op = PromptOp("Who Are you?") + package_msg_op = FxnOp(append_to_sys) + llm_config = {"model": "gpt-4o-mini"} # Check gpt-4o finetuning. + llm_call_op = LLMCallOp() + + @compute_graph() + async def forward(): + """Perform a forward pass through the model and calculate the loss.""" + s = await prompt_op() + msg = await package_msg_op(s) + return await llm_call_op(llm_config, msg) + + # Set up the optimizer with a reward threshold higher than the test rewards + opt_config = {"buffer_size": 10, "return_threshold": 5.0} + opt = OpenAISFTOpt(llm_call_op=llm_call_op, config=OpenAISFTOptConfig(**opt_config)) + + # Test with rewards lower than the threshold + res_list = [await forward()] + rewards = [3] # Lower than the threshold + for res, _ in zip(res_list, rewards, strict=False): + res.compute_grads(backward_fns={FxnOp: ste}) + + trajectory = Trajectory( + steps=[ + Transition( + timestep=0, + agent_state=None, + next_agent_state=None, + observation=Transition.NO_OBSERVATION, + next_observation=Transition.NO_OBSERVATION, + action=res, + reward=reward, + done=False, + ) + for res, reward in zip(res_list, rewards, strict=False) + ] + ) + + opt.aggregate_trajectory(trajectory) + + # Assert that the train buffer remains empty + assert not opt.train_buffer, "Expected train buffer to be empty." + + +@pytest.mark.asyncio +@pytest.mark.skipif( + IN_GITHUB_ACTIONS, + reason="Flaky test because of the stochasticity of LLM completion", +) +async def test_openai_sft_optimizer_with_tool_calls() -> None: + agent = ReActAgent( + llm_model={"model": CILLMModelNames.OPENAI.value, "temperature": 1.0} + ) + opt = OpenAISFTOpt.from_agent(agent) + rollout = RolloutManager(agent) + + results: list[tuple[Trajectory, Any]] = await rollout.sample_trajectories( + environment_factory=lambda: DummyEnv(end_immediately=False), + max_steps=2, + batch_size=12, + ) + + for traj, _ in results: + if traj.failed: + continue + + assert len(traj.steps) == 2 + traj.steps[0].reward = 0.0 + traj.steps[1].reward = 1.0 + traj.steps[1].truncated = False + traj.steps[1].done = True + + for step in traj.steps: + assert step.action is not None, "Expected step.action to be non-None" + step.action.compute_grads() + + opt.aggregate_trajectory(traj) + + await opt.update() + + +@pytest.mark.asyncio +@pytest.mark.skipif( + IN_GITHUB_ACTIONS, + reason=( + "Flaky test because of the stochasticity of LLM completion; small rate limits" + ), +) +async def test_openai_sft_optimizer_with_length_penalty() -> None: + agent = ReActAgent( + llm_model={"model": CILLMModelNames.OPENAI.value, "temperature": 1.0} + ) + opt_config = { + "buffer_size": 10, + "return_threshold": 5.0, # Set return threshold to 5.0 + } + opt = OpenAISFTOpt.from_agent(agent, config=OpenAISFTOptConfig(**opt_config)) + rollout = RolloutManager(agent) + + # Define a penalty function that penalizes the length of the return list + def length_penalty(length: int) -> float: + return 1 / (1 + length) # Simple penalty based on list length + + # Sample trajectories from the environment + results: list[tuple[Trajectory, Any]] = await rollout.sample_trajectories( + environment_factory=lambda: DummyEnv(end_immediately=False), + max_steps=2, + batch_size=12, + ) + + # Modify the first trajectory to create a short trajectory with a length of 1 + short_trajectory = results[0][0] + short_trajectory.steps = short_trajectory.steps[:1] # Keep only the first step + short_trajectory.steps[0].reward = 12.0 # High reward + short_trajectory.steps[0].done = True + assert ( + short_trajectory.steps[0].action is not None + ), "Expected step.action to be non-None" + short_trajectory.steps[0].action.compute_grads() + + # Apply the penalty function when aggregating the short trajectory + opt.aggregate_trajectory(short_trajectory, len_penalty_fn=length_penalty) + + # Modify the second trajectory to create a long trajectory with a length of 10 + long_trajectory = results[1][0] + long_trajectory.steps *= 5 # Repeat steps to make 10 + for step in long_trajectory.steps: + step.reward = 0.5 # Low reward for each step + step.truncated = False + step.done = False + assert step.action is not None, "Expected step.action to be non-None" + step.action.compute_grads() + long_trajectory.steps[-1].done = True # Mark the last step as done + + # Apply the penalty function when aggregating the long trajectory + opt.aggregate_trajectory(long_trajectory, len_penalty_fn=length_penalty) + + # Verify that the short trajectory is in the buffer and the long one is not + assert len(opt.train_buffer) == 1, "Expected only one trajectory in the buffer." + + +def mem_opt_failed(exc: BaseException) -> bool: + # Sometimes the memory opt fails to converge because the training examples + # are not informative. Try again + return isinstance(exc, AssertionError) and "should be less than" in str(exc) + + +@pytest.mark.asyncio +@tenacity.retry( + stop=tenacity.stop_after_attempt(3), + retry=tenacity.retry_if_exception(mem_opt_failed), +) +async def test_memory_optimizer() -> None: + x = ["Hello", "Day", "Bar"] + y = [str(len(xi)) for xi in x] + + mem_op = MemoryOp() + # seed with one memory to show example + await mem_op.memory_model.add_memory( + Memory(query="Great", output=str(len("Great")), value=1.0) + ) + package_msg_op = FxnOp( + lambda mems, xi: append_to_sys( + "Previous attempts:\n" + + "\n\n".join(str(m) for m in mems) + + f"\n-----\n\n{xi}", + "Guess a number based on the input word.", + ) + ) + # this is flaky, so use a smarter model + llm_config = {"model": "gpt-4-turbo", "temperature": 0.0, "max_retries": 3} + llm_call_op = LLMCallOp() + strip_op = FxnOp(lambda x: x.content) + loss_op = SquaredErrorLoss() + + async def reward_fn(target: str, result: OpResult) -> float: + # MemoryOp works with rewards, not gradients. So instead of + # backproping through the loss, we compute a non-differentiable + # reward. + loss = (await loss_op(target, result)).value + if loss == 0: + # positive reward if it got it right + return 1.0 + return -loss + + opt = MemoryOpt(memory_op=mem_op, output_op=llm_call_op) + + trajectory = Trajectory() + for xi, yi in zip(x, y, strict=True): + async with compute_graph(): + mems = await mem_op(xi) + msg = await package_msg_op(mems, xi) + c = await llm_call_op(llm_config, msg) + yh = await strip_op(c) + + reward = await reward_fn(yi, yh) + yh.compute_grads() + + # MemoryOpt is only going to look at action and reward, so set placeholders + # for the other values + trajectory.steps.append( + Transition( + timestep=0, + agent_state=None, + next_agent_state=None, + observation=Transition.NO_OBSERVATION, + next_observation=Transition.NO_OBSERVATION, + action=yh, + reward=reward, + done=False, + ) + ) + + opt.aggregate([trajectory]) + await opt.update() + + assert ( + len(mem_op.memory_model.memories) == 4 + ), "Incorrect number of stored memories after optimization step." + assert ( + not opt.example_buffer + ), "MemoryOpt buffer should be empty after applying update" + + x_eval, y_eval = xi, yi # pylint: disable=undefined-loop-variable + async with compute_graph(): + with eval_mode(): + mems = await mem_op(x_eval) + msg = await package_msg_op(mems, x_eval) + print(msg) + assert len(msg.value) > 1, "Message should have multiple memories." + # check that Input appears a few times (from memories) + assert msg.value[-1].content, "unexpected message content" + assert ( + msg.value[-1].content.count("Input") > 2 + ), "Input should appear multiple times in the response." + + c = await llm_call_op(llm_config, msg) + yh = await strip_op(c) + loss = await loss_op(y_eval, yh) + + assert ( + loss.value < 100 + ), f"Loss ({loss.value:.3f}) should be less than 100 after training." diff --git a/tests/test_prompts.py b/tests/test_prompts.py new file mode 100644 index 00000000..c19ac43f --- /dev/null +++ b/tests/test_prompts.py @@ -0,0 +1,21 @@ +import textwrap + +from ldp.llms.prompts import indent_xml + + +def test_indent_xml(): + xml = "fooline1\nline2\nline3" + expected = textwrap.dedent( + """\ + + + foo + + line1 + line2 + line3 + + + """ + ) + assert indent_xml(xml) == expected diff --git a/tests/test_rollouts.py b/tests/test_rollouts.py new file mode 100644 index 00000000..c3ae628f --- /dev/null +++ b/tests/test_rollouts.py @@ -0,0 +1,268 @@ +import random +import tempfile +from copy import deepcopy +from typing import Any, cast + +import pytest +from aviary.env import Environment, Frame +from aviary.message import Message +from aviary.tools import Tool, ToolRequestMessage +from pydantic import BaseModel + +from ldp.agent import Agent, SimpleAgent, SimpleAgentState +from ldp.alg.beam_search import BeamSearchRollout +from ldp.alg.callbacks import Callback +from ldp.alg.rollout import RolloutManager +from ldp.alg.tree_search import TreeSearchRollout +from ldp.data_structures import Trajectory, Transition +from ldp.graph.common_ops import FxnOp +from ldp.graph.op_utils import compute_graph, set_training_mode +from ldp.graph.ops import OpResult + + +class DummyEnv(Environment[None]): + def __init__(self): + self.tools = [Tool.from_function(self.talk)] + + async def reset(self) -> tuple[list[Message], list[Tool]]: + return [Message(content="Hello!")], self.tools + + async def step( + self, action: ToolRequestMessage + ) -> tuple[list[Message], float, bool, bool]: + if action.tool_calls: + responses = cast(list[Message], await self.exec_tool_calls(action)) + else: + responses = [Message(content="Use the 'talk' tool to speak.")] + + return responses, 0.0, False, False + + async def talk(self, message: str) -> str: + """Say something to me. + + Args: + message (str): what you want to say + + Returns: + str: my response + """ + return message + + def export_frame(self) -> Frame: + return Frame() + + +async def count_exclamations(traj: Trajectory) -> float: + last_step = traj.steps[-1] + agent_state = cast(SimpleAgentState, last_step.next_agent_state) + return float( + sum(m.content.count("!") for m in agent_state.messages if m.content is not None) + ) + + +@pytest.mark.parametrize("training", [True, False]) +@pytest.mark.asyncio +async def test_rollout(training: bool) -> None: + agent = SimpleAgent() + callback = DummyCallback() + set_training_mode(training) + rollout_manager = RolloutManager( + agent, + catch_agent_failures=False, + catch_env_failures=False, + callbacks=[callback], + ) + trajs = await rollout_manager.sample_trajectories( + environments=[DummyEnv(), DummyEnv()], max_steps=1 + ) + assert len(trajs) == 2 + + # Let's check we can serialize and deserialize the trajectories + for traj in trajs: + with tempfile.NamedTemporaryFile(suffix=".jsonl") as f: + traj.to_jsonl(filename=f.name) + rehydrated_traj = Trajectory.from_jsonl(f.name) + assert traj.traj_id == rehydrated_traj.traj_id + + assert all(v == 2 for v in callback.fn_invocations.values()) + + +async def adeepcopy(x): + return deepcopy(x) + + +@pytest.mark.asyncio +async def test_beam_search() -> None: + agent = SimpleAgent() + callback = DummyCallback() + beam_search = BeamSearchRollout( + agent, + beam_width=1, # keep these numbers small to speed up test + samples_per_beam=1, + env_clone_fn=adeepcopy, + agent_clone_fn=deepcopy, + scoring_fn=count_exclamations, + catch_agent_failures=False, + catch_env_failures=False, + callbacks=[callback], + ) + + trajs = await beam_search.sample_trajectories( + environments=[DummyEnv(), DummyEnv()], max_steps=1 + ) + assert len(trajs) == 2 + + assert all(v == 2 for v in callback.fn_invocations.values()) + + +class DummyCallback(Callback): + def __init__(self): + self.fn_invocations = { + "before_transition": 0, + "after_agent_get_asv": 0, + "after_env_step": 0, + "after_transition": 0, + } + + async def before_transition( + self, + traj_id: str, + agent: Agent, + env: Environment, + agent_state: Any, + obs: list[Message], + ) -> None: + self.fn_invocations["before_transition"] += 1 + + async def after_agent_get_asv( + self, + traj_id: str, + action: OpResult[ToolRequestMessage], + next_agent_state: Any, + value: float, + ): + self.fn_invocations["after_agent_get_asv"] += 1 + + async def after_env_step( + self, + traj_id: str, + obs: list[Message], + reward: float, + done: bool, + trunc: bool, + ): + self.fn_invocations["after_env_step"] += 1 + + async def after_transition( + self, + traj_id: str, + agent: Agent, + env: Environment, + transition: Transition, + ) -> None: + self.fn_invocations["after_transition"] += 1 + + +class CountingAgentState(BaseModel): + count: float = 0.0 + + +class CountingAgent(Agent[CountingAgentState]): + def __init__(self): + self.op = FxnOp[ToolRequestMessage](lambda: ToolRequestMessage(tool_calls=[])) + + async def init_state(self, tools: list[Tool]) -> CountingAgentState: + return CountingAgentState() + + @compute_graph() + async def get_asv( + self, agent_state: CountingAgentState, obs: list[Message] + ) -> tuple[OpResult[ToolRequestMessage], CountingAgentState, float]: + new_state = CountingAgentState(count=float(cast(str, obs[0].content)) + 1) + action = await self.op() + return action, new_state, 0.0 + + +class CountingEnv(Environment[float]): + def __init__(self, state: float = 0.0): + self.state = state + + async def reset(self) -> tuple[list[Message], list[Tool]]: + return [Message(content=str(self.state))], [] + + async def step( + self, action: ToolRequestMessage + ) -> tuple[list[Message], float, bool, bool]: + self.state += 1 + return [Message(content=str(self.state))], 0.0, self.state >= 3, False + + def export_frame(self) -> Frame: + return Frame() + + +@pytest.mark.asyncio +async def test_deterministic_rollout(): + agent = CountingAgent() + env = CountingEnv() + + rollout_manager = RolloutManager(agent) + traj, *_ = await rollout_manager.sample_trajectories(environments=[env]) + + assert len(traj.steps) == 3 + for i_step, step in enumerate(traj.steps): + f_step = float(i_step) + # check that we didn't clobber any agent or env states + assert step.agent_state.count == f_step + assert step.next_agent_state.count == f_step + 1 + assert step.observation[0].content == str(f_step) + assert step.next_observation[0].content == str(f_step + 1) + + +class NoisyCountingEnv(CountingEnv): + async def step( + self, action: ToolRequestMessage + ) -> tuple[list[Message], float, bool, bool]: + self.state += 1 + random.uniform(-0.01, 0.01) + return [Message(content=str(self.state))], 0.0, self.state >= 3, False + + +@pytest.mark.asyncio +async def test_tree_search(): + agent = CountingAgent() + # Use a slightly stochastic env so we can distinguish branches + env = NoisyCountingEnv() + + callback = DummyCallback() + rollout_manager = TreeSearchRollout( + agent, + branching_factor=2, + env_clone_fn=deepcopy, + concurrency_limit=1, + callbacks=[callback], + ) + trajs = await rollout_manager.sample_tree(env, max_depth=3) + assert len(trajs) == 8 + + observations = {} # type: ignore[var-annotated] + for traj in trajs: + branch_path = tuple(cast(str, traj.traj_id).split(":")[1:]) + + prev_step: Transition | None = None + for i_step, step in enumerate(traj.steps): + if prev_step is not None: + # Check that the child node started at the state emitted at the parent node + assert prev_step.next_agent_state == step.agent_state + + # Steps that started at the same node in the tree should have the same observation + node_id = branch_path[: i_step + 1] + if node_id in observations: + assert observations[node_id] == step.observation[0].content + else: + observations[node_id] = step.observation[0].content + + prev_step = step + + # We expect sum_{i=1}^3 2^i = 2^4 - 2 = 14 transitions: + # - branching factor = 2, depth = 3 + # - root node isn't sampled, so no i=0 term in sum + assert all(v == 14 for v in callback.fn_invocations.values()) diff --git a/tests/test_runners.py b/tests/test_runners.py new file mode 100644 index 00000000..d557666d --- /dev/null +++ b/tests/test_runners.py @@ -0,0 +1,141 @@ +from collections.abc import Sequence +from unittest.mock import patch + +import pytest +from aviary.env import DummyEnv, TaskDataset + +from ldp.agent import MemoryAgent, SimpleAgent +from ldp.alg.callbacks import Callback, MeanMetricsCallback +from ldp.alg.datasets import ( # noqa: F401 # Force TASK_DATASET_REGISTRY update + DummyTaskDataset, +) +from ldp.alg.optimizer import default_optimizer_factory +from ldp.alg.runners import ( + Evaluator, + EvaluatorConfig, + OfflineTrainer, + OfflineTrainerConfig, + OnlineTrainer, + OnlineTrainerConfig, +) +from ldp.data_structures import Trajectory + + +@pytest.mark.asyncio +async def test_online_trainer(): + agent = MemoryAgent() + opt = default_optimizer_factory(agent) + dataset = TaskDataset.from_name("dummy") + callback = DummyCallback() + + train_conf = OnlineTrainerConfig( + batch_size=1, + num_train_iterations=1, + max_rollout_steps=1, + num_eval_iterations=1, + eval_every=1, + ) + trainer = OnlineTrainer( + config=train_conf, + agent=agent, + optimizer=opt, + train_dataset=dataset, + eval_dataset=dataset, + callbacks=[callback], + ) + await trainer.train() + + for k, v in callback.fn_invocations.items(): + # eval is run 3 times: before training, during training, after training + assert v == (3 if "eval" in k else 1) + + +@pytest.mark.asyncio +async def test_evaluator() -> None: + agent = SimpleAgent() + dataset = TaskDataset.from_name("dummy") + metrics_callback = MeanMetricsCallback(eval_dataset=dataset) + count_callback = DummyCallback() + + eval_conf = EvaluatorConfig(num_eval_iterations=1) + evaluator = Evaluator( + config=eval_conf, + agent=agent, + dataset=dataset, + callbacks=[metrics_callback, count_callback], + ) + with patch.object(DummyEnv, "close") as mock_close: + await evaluator.evaluate() + + mock_close.assert_awaited_once(), "Env should be closed" + assert isinstance(metrics_callback.eval_means["reward"], float) + + for k, v in count_callback.fn_invocations.items(): + assert v == (1 if "eval" in k else 0) + + +@pytest.mark.asyncio +async def test_offline_trainer(): + # This is kind of a system test of getting trajectories from the evaluator + # and then training on them "offline" + agent = MemoryAgent() + opt = default_optimizer_factory(agent) + dataset = TaskDataset.from_name("dummy") + traj_callback = StoreTrajectoriesCallback() + + evaluator = Evaluator( + config=EvaluatorConfig(num_eval_iterations=1), + agent=agent, + dataset=dataset, + callbacks=[traj_callback], + ) + await evaluator.evaluate() + assert len(traj_callback.trajectories) == 1 + + count_callback = DummyCallback() + train_conf = OfflineTrainerConfig(batch_size=1) + trainer = OfflineTrainer( + config=train_conf, + agent=agent, + optimizer=opt, + train_trajectories=traj_callback.trajectories, + callbacks=[count_callback], + ) + await trainer.train() + + assert count_callback.fn_invocations == { + "after_train_step": 1, + "after_eval_step": 0, + "after_eval_loop": 0, + "after_update": 1, + } + + +class StoreTrajectoriesCallback(Callback): + def __init__(self): + self.trajectories = [] + + async def after_eval_step(self, trajectories: Sequence[Trajectory]) -> None: + self.trajectories.extend(trajectories) + + +class DummyCallback(Callback): + def __init__(self): + self.fn_invocations = { + "after_train_step": 0, + "after_eval_step": 0, + "after_eval_loop": 0, + "after_update": 0, + } + + async def after_train_step(self, trajectories: Sequence[Trajectory]) -> None: + self.fn_invocations["after_train_step"] += 1 + + async def after_eval_step(self, trajectories: Sequence[Trajectory]) -> None: + self.fn_invocations["after_eval_step"] += 1 + + async def after_eval_loop(self) -> None: + self.fn_invocations["after_eval_loop"] += 1 + + async def after_update(self) -> None: + self.fn_invocations["after_update"] += 1 diff --git a/tests/test_torch_ops.py b/tests/test_torch_ops.py new file mode 100644 index 00000000..02ee22ab --- /dev/null +++ b/tests/test_torch_ops.py @@ -0,0 +1,326 @@ +import asyncio +import random +import time +from contextlib import nullcontext +from uuid import UUID + +import numpy as np +import pytest +import torch +from torch import nn + +from ldp.graph.async_torch import AsyncTorchModule, async_protect_torch_call +from ldp.graph.common_ops import ConfigOp, FxnOp, PromptOp +from ldp.graph.gradient_estimators import straight_through_estimator as ste +from ldp.graph.op_utils import compute_graph, set_training_mode +from ldp.graph.torch_ops import TorchOp + + +@pytest.fixture(name="run_id") +def fixture_run_id() -> UUID: + return UUID("12345678-1234-5678-1234-567812345678") + + +class AddModule(nn.Module): + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return x + y + + +class MulModule(nn.Module): + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return x * y + + +class SinModule(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.sin(x) + + +TEST_CASES = [ + pytest.param( + AddModule(), + ( + torch.tensor(2.0, requires_grad=True), + torch.tensor(3.0, requires_grad=True), + ), + 5.0, + {"x": 1.0, "y": 1.0}, + id="addition", + ), + pytest.param( + MulModule(), + ( + torch.tensor(2.0, requires_grad=True), + torch.tensor(3.0, requires_grad=True), + ), + 6.0, + {"x": 3.0, "y": 2.0}, + id="multiplication", + ), + pytest.param( + SinModule(), + (torch.tensor(np.pi / 4, requires_grad=True),), + np.sin(np.pi / 4), + {"x": np.cos(np.pi / 4)}, + id="non_linear", + ), +] + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("module", "inputs", "expected_output", "expected_grads"), TEST_CASES +) +@pytest.mark.parametrize("training_mode", [True, False]) +async def test_torch_op( + module, inputs, expected_output, expected_grads, training_mode: bool +): + set_training_mode(training_mode) + op = TorchOp(module) + async with compute_graph(): + result = await op(*inputs) + assert result.value.requires_grad == training_mode + + if isinstance(expected_output, torch.Tensor): + assert torch.allclose(result.value, expected_output) + else: + assert result.value.detach() == pytest.approx(expected_output) + + raises_context = ( + nullcontext() + if training_mode + else pytest.raises(KeyError, match="key='tensor_input' not found") + ) + + with raises_context: + grad_output = torch.ones_like(result.value) + arg_grads, kwarg_grads = TorchOp.backward( + op.ctx, + [], + {}, # NOTE: compute_grads() would fill this, but TorchOp.backwards() should ignore it + grad_output=grad_output, + call_id=result.call_id, + ) + + assert set(kwarg_grads.keys()) == set(expected_grads.keys()) + + for param, expected in expected_grads.items(): + computed = kwarg_grads[param] + if isinstance(expected, torch.Tensor): + assert torch.allclose(torch.tensor(computed), expected) + else: + assert computed == pytest.approx(expected) + + +@pytest.mark.asyncio +async def test_with_kwargs(): + class ScaledAddModule(nn.Module): + def forward(self, x, y, scale=1.0): + return (x + y) * scale + + op = TorchOp(ScaledAddModule()) + x = torch.tensor(2.0, requires_grad=True) + y = torch.tensor(3.0, requires_grad=True) + scale = torch.tensor(2.0, requires_grad=True) + + async with compute_graph(): + result = await op(x, y, scale=scale) + assert isinstance(result.value.detach(), torch.Tensor) + assert result.value.detach() == pytest.approx(10.0) + + grad_output = torch.tensor(1.0) + arg_grads, kwarg_grads = TorchOp.backward( + op.ctx, + [], + {"x": x, "y": y, "scale": scale}, + grad_output, + result.call_id, + ) + + assert not arg_grads + assert len(kwarg_grads) == 3 + assert kwarg_grads["x"] == pytest.approx(2.0) # d(result)/dx = scale = 2.0 + assert kwarg_grads["y"] == pytest.approx(2.0) # d(result)/dy = scale = 2.0 + assert kwarg_grads["scale"] == pytest.approx( + 5.0 + ) # d(result)/d(scale) = x + y = 5.0 + + +@pytest.mark.asyncio +async def test_torch_op_composition() -> None: + # Define our ops + config_op = ConfigOp(config={"scale": 2.0}) + fxn_op = FxnOp(lambda x: x["scale"]) + + class ScaleSum(nn.Module): + def forward(self, x, y): + return y * torch.sum(x) + + torch_op = TorchOp(ScaleSum()) + prompt_op = PromptOp("The result is: {result}") + + # Forward pass + async with compute_graph(): + config = await config_op() + fxn_result = await fxn_op(config) + x = [1.0, 2.0, 3.0] + torch_result = await torch_op(x, fxn_result) + prompt_result = await prompt_op(result=torch_result) + + # Check forward pass results + assert isinstance(torch_result.value, torch.Tensor) + assert torch_result.value.detach() == pytest.approx( + 12.0 + ) # y * sum(x) = 2.0 * 6.0 = 12.0 + assert fxn_result.value == pytest.approx(2.0) + assert prompt_result.value == "The result is: 12.0" + + # Backward pass + loss_grad = -1.0 # Arbitrary d(loss)/d(result) + prompt_result.compute_grads( + loss_grad, + backward_fns={ + PromptOp: ste, + FxnOp: ste, + }, + ) + fxn_grad = fxn_op.get_input_grads(fxn_result.call_id) + torch_grad_args, torch_grad_kwargs = torch_op.get_input_grads(torch_result.call_id) + + # Check backward pass results + + # d(loss)/d(scale) = d(loss)/d(result) * d(result)/d(scale) = -1.0 * 6.0 = -6.0 + assert fxn_grad == ([], {"x": {"scale": -6.0}}) + + assert not torch_grad_args + # d(result)/dx = y = 2.0 + assert torch.allclose(torch_grad_kwargs["x"], torch.tensor([-2.0, -2.0, -2.0])) # type: ignore[arg-type] + # d(result)/dy = sum(x) = 6.0 + assert torch.allclose(torch_grad_kwargs["y"], torch.tensor(-6.0)) # type: ignore[arg-type] + + +@pytest.mark.asyncio +async def test_torch_concurrency(): + assert torch.is_grad_enabled() + + def check_no_grad(grad_expected: bool): + # This takes the place of a pytorch module - we want to + # see if torch.is_grad_enabled() is set correctly + assert torch.is_grad_enabled() == grad_expected + + async def change_grads_a_lot(): + for _ in range(10): + no_grad = random.choice([True, False]) + await asyncio.sleep(0) + await async_protect_torch_call(check_no_grad, no_grad=no_grad)( # type: ignore[arg-type] + not no_grad + ) + assert torch.is_grad_enabled() # Make sure grad state is reset properly + + # The below is an example of doing unsafe concurrent operations in torch. + # if no_grad: + # with torch.no_grad(): + # asyncio.sleep(0.0) + # assert not torch.is_grad_enabled() + # else: + # asyncio.sleep(0.0) + # check_no_grad(not no_grad) + + await asyncio.gather(*[change_grads_a_lot() for _ in range(10)]) + + +@pytest.mark.asyncio +async def test_torch_autocast(): + model = nn.Linear(4, 4) + + def call_model(): + result = model(torch.rand(4)) + assert result.dtype == torch.bfloat16 + + await async_protect_torch_call( + call_model, # type: ignore[arg-type] + autocast_dtype=torch.bfloat16, + autocast_device_type=torch.device("cpu").type, + )() + + +class TestAsyncTorchModule: + @pytest.mark.asyncio + async def test_batching(self): + model = torch.nn.Linear(2, 2) + batch_size = 4 + max_wait = 1.0 # long timeout so we can measure it + async_module = AsyncTorchModule(model, batch_size, max_wait) + + # First test that we can do a single call + start = time.time() + result = await async_module(input=torch.rand(2)) + assert max_wait < time.time() - start < max_wait * 2 + assert result.shape == (2,) + + # Now check that we can do a batched call + start = time.time() + results = await asyncio.gather(*[ + async_module(input=torch.rand(2)) for _ in range(batch_size) + ]) + # should not have waited for timeout since we hit batch_size + assert time.time() - start < max_wait + assert len(results) == batch_size + assert all(r.shape == (2,) for r in results) + + # Finally check an ueven number of calls + results = await asyncio.gather(*[ + async_module(input=torch.rand(2)) for _ in range(2 * batch_size + 1) + ]) + assert len(results) == 2 * batch_size + 1 + assert all(r.shape == (2,) for r in results) + + @pytest.mark.parametrize("batch_size", [4, 10]) + @pytest.mark.parametrize("max_wait", [0.01, 0.1, 1.0]) + @pytest.mark.asyncio + async def test_ordering(self, batch_size: int, max_wait: float): + # Make sure that we are actually getting the right results back, and that results + # aren't jumbled + + model = torch.nn.Linear(2, 1, bias=False) + model.weight.data.fill_(1.0) + async_module = AsyncTorchModule(model, batch_size, max_wait) + + # Run 2 loops to make sure subsequent calls don't interfere + for _ in range(2): + inputs = [torch.rand(2) for _ in range(32)] + outputs = await asyncio.gather(*[async_module(input=x) for x in inputs]) + + for inp, out in zip(inputs, outputs, strict=True): + assert torch.allclose(out, inp.sum()) + + # Make sure we didn't leave any dangling work in the buffer + assert not async_module._work_buffer + + # For tiny models, we expect a slowdown, but less than 3x. + # As the model gets bigger, we get a slight speedup. + # The real benefit is when the model fills up a GPU, but we don't want that in tests. + @pytest.mark.skip( + reason="The speedups are very hardware-dependent, so do not automatically run.", + ) + @pytest.mark.parametrize(("dim", "expected_speedup"), [(2, 3), (4096, 0.8)]) + @pytest.mark.asyncio + async def test_performance(self, dim: int, expected_speedup: float): + batch_size = 10 + model = torch.nn.Linear(dim, dim) + async_module = AsyncTorchModule(model, batch_size, 0.001) + sync_batch = torch.rand(batch_size, dim) + start = time.time() + _ = model(sync_batch) + sync_time = time.time() - start + + async_batch = list(sync_batch) + start = time.time() + _ = await asyncio.gather(*[async_module(input=x) for x in async_batch]) + async_time = time.time() - start + print( + f"Dimension: {dim}; torch.nn.Module: {sync_time:.6f}s; AsyncTorchModule:" + f" {async_time:.6f}s" + ) + + assert async_time < sync_time * expected_speedup From b0cc968b683b0535f87d7361bd889d2976e539f2 Mon Sep 17 00:00:00 2001 From: James Braza Date: Tue, 3 Sep 2024 16:02:53 -0700 Subject: [PATCH 4/5] Created CI pipelines for lint/test, publish, and codeflash --- .github/workflows/codeflash.yml | 42 +++++++++++++++++++++++++++++++++ .github/workflows/publish.yml | 20 ++++++++++++++++ .github/workflows/tests.yml | 39 ++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 .github/workflows/codeflash.yml create mode 100644 .github/workflows/publish.yml create mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/codeflash.yml b/.github/workflows/codeflash.yml new file mode 100644 index 00000000..8d03727e --- /dev/null +++ b/.github/workflows/codeflash.yml @@ -0,0 +1,42 @@ +name: CodeFlash + +on: + pull_request: + paths: + - "ldp/**" + workflow_dispatch: + +concurrency: # Cancel prior if new push, SEE: https://stackoverflow.com/a/72408109 + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + optimize: # SEE: https://docs.codeflash.ai/getting-started/codeflash-github-actions + runs-on: ubuntu-latest + env: + CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }} + CODEFLASH_PR_NUMBER: ${{ github.event.number }} + steps: + - name: Check if PR is from CodeFlash bot + id: bot_check + working-directory: . + run: | + echo "Checking if this PR is created by CodeFlash bot..." + if [ "${{ github.event.pull_request.user.login }}" == "codeflash-ai[bot]" ]; then + echo "PR created by Codeflash bot. Skipping optimization." + echo "skip_remaining_steps=yes" >> $GITHUB_OUTPUT + else + echo "skip_remaining_steps=no" >> $GITHUB_OUTPUT + echo "It's not. Proceeding with the optimization." + fi + - if: steps.bot_check.outputs.skip_remaining_steps == 'no' + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up uv + if: steps.bot_check.outputs.skip_remaining_steps == 'no' + run: curl -LsSf https://astral.sh/uv/install.sh | sh + - if: steps.bot_check.outputs.skip_remaining_steps == 'no' + run: uv sync + - if: steps.bot_check.outputs.skip_remaining_steps == 'no' + run: uv run codeflash diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..179fb482 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,20 @@ +name: Publish + +on: + release: + types: [created] + workflow_dispatch: + +jobs: + publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + - run: uv sync + - name: Build a binary wheel and a source tarball + run: uv run python -m build --sdist --wheel --outdir dist/ . + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..0d5a6dba --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,39 @@ +name: Lint and Test + +on: + push: + branches: [main] + pull_request: + +jobs: + pre-commit: + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' # pre-commit-ci/lite-action only runs here + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + - uses: pre-commit/action@v3.0.1 + - uses: pre-commit-ci/lite-action@v1.0.2 + if: always() + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + - run: uv sync + - run: uv run refurb ldp tests + - run: uv run pylint ldp tests + test: + runs-on: large-runner + steps: + - uses: actions/checkout@v4 + - name: Set up uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + - run: uv sync + - run: uv run pytest -n 16 --dist=loadfile # auto only launches 8 workers in CI, despite runners have 16 cores + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} From 7894aef8f22077d566dc0705d1d75ba5929cb38d Mon Sep 17 00:00:00 2001 From: James Braza Date: Tue, 3 Sep 2024 11:45:31 -0700 Subject: [PATCH 5/5] Populated basic README --- README.md | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8e6e33f1..fa384d7f 100644 --- a/README.md +++ b/README.md @@ -1 +1,98 @@ -# ldp \ No newline at end of file +# ldp + +Agent framework for constructing language model agents and training on constructive tasks. + +This repo models agent-environment interactions using a +[Partially Observable Markov Decision Process][pomdp] (POMDP). +Inspired by POMDP, this repo's name `ldp` stands for Language Decision Processes. + +[pomdp]: https://en.wikipedia.org/wiki/Partially_observable_Markov_decision_process + +## Installation + +To install `ldp`: + +```bash +pip install -e . +``` + +If you plan to export Graphviz visualizations, +make sure you also install the `graphviz` library into your OS via: + +- Linux: `apt install graphviz` +- macOS: `brew install graphviz` + +## Agent/Policy + +An agent should have two functions: + +```py +agent_state = await agent.init_state(tools=tools) +new_action, new_agent_state, value = await agent.get_asv( + agent_state, obs +) +``` + +An agent should have a function `get_asv(agent_state, obs)` +that chooses an action (`a`) from the observation messages, +and returns the next agent state (`s`) and a value estimate (`v`). +The first argument, `agent_state`, is a state specific for the agent +that can be used for training from episodes. +You can make it `None` if you aren't using it. +It could contain things like agent memory. + +The `obs` are not the complete list of observations, but rather the last list from `env.step`. +The agent should keep track of observations via its state if it would like to keep them. + +The value can be `0`, +it is the agent's estimate of the future rewards given its state and observations. +This is used for training. + +### Generic Support + +The `Agent` (as well as classes in `agent.ops`) +are [generics](https://en.wikipedia.org/wiki/Generic_programming), +which means: + +- `Agent` is designed to support arbitrary types +- Subclasses can exactly specify state types, making the code more readable + +If you are new to Python generics (`typing.Generic`), +please read about them in [Python typing](https://docs.python.org/3/library/typing.html#generics). + +Below is how to specify an agent with a custom state type. + +```py +from dataclasses import dataclass, field +from datetime import datetime + +from ldp.agents import Agent + + +@dataclass +class MyComplexState: + vector: list[float] + timestamp: datetime = field(default_factory=datetime.now) + + +class MyAgent(Agent[MyComplexState]): + """Some agent who is now type checked to match the custom state.""" +``` + +## Complete Example + +```py +from ldp.agents import SimpleAgent +from aviary.env import DummyEnv + +env = DummyEnv() +agent = SimpleAgent() + +obs, tools = await env.reset() +agent_state = await agent.init_state(tools=tools) + +done = False +while not done: + action, agent_state, _ = await agent.get_asv(agent_state, obs) + obs, reward, done, truncated = await env.step(action.value) +```