diff --git a/pyproject.toml b/pyproject.toml index 48c79f8..e8d5fa1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,8 +29,7 @@ dependencies = [ "httpx>=0.28.0", "httpcore>=1.0.9", # Required for Python 3.14 compatibility - # Token counting (OpenAI models) - "tiktoken>=0.12.0", + # Core functionality "pydantic>=2.12.0", diff --git a/src/gac/ai_utils.py b/src/gac/ai_utils.py index d798c76..1873cfc 100644 --- a/src/gac/ai_utils.py +++ b/src/gac/ai_utils.py @@ -8,14 +8,11 @@ import os import time from collections.abc import Callable -from functools import lru_cache from typing import Any, cast -import tiktoken from rich.console import Console from rich.status import Status -from gac.constants import EnvDefaults, Utility from gac.errors import AIError from gac.oauth import QwenOAuthProvider, refresh_token_if_expired from gac.oauth.token_store import TokenStore @@ -25,29 +22,16 @@ console = Console() -@lru_cache(maxsize=1) -def _should_skip_tiktoken_counting() -> bool: - """Return True when token counting should avoid tiktoken calls entirely.""" - value = os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN)) - return value.lower() in ("true", "1", "yes", "on") - - def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int: - """Count tokens in content using the model's tokenizer.""" + """Count tokens in content using character-based estimation (1 token per 3.4 characters).""" text = extract_text_content(content) if not text: return 0 - if _should_skip_tiktoken_counting(): - return len(text) // 4 - - try: - encoding = get_encoding(model) - return len(encoding.encode(text)) - except (KeyError, UnicodeError, ValueError) as e: - logger.error(f"Error counting tokens: {e}") - # Fallback to rough estimation (4 chars per token on average) - return len(text) // 4 + # Use simple character-based estimation: 1 token per 3.4 characters (rounded) + result = round(len(text) / 3.4) + # Ensure at least 1 token for non-empty text + return result if result > 0 else 1 def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str: @@ -61,24 +45,6 @@ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) - return "" -@lru_cache(maxsize=1) -def get_encoding(model: str) -> tiktoken.Encoding: - """Get the appropriate encoding for a given model.""" - provider, model_name = model.split(":", 1) if ":" in model else (None, model) - - if provider != "openai": - return tiktoken.get_encoding(Utility.DEFAULT_ENCODING) - - try: - return tiktoken.encoding_for_model(model_name) - except KeyError: - # Fall back to default encoding if model not found - return tiktoken.get_encoding(Utility.DEFAULT_ENCODING) - except (OSError, ConnectionError): - # If there are any network/SSL issues, fall back to default encoding - return tiktoken.get_encoding(Utility.DEFAULT_ENCODING) - - def generate_with_retries( provider_funcs: dict[str, Callable[..., str]], model: str, diff --git a/src/gac/config.py b/src/gac/config.py index 5982e6e..65ce6d2 100644 --- a/src/gac/config.py +++ b/src/gac/config.py @@ -27,7 +27,6 @@ class GACConfig(TypedDict, total=False): warning_limit_tokens: int always_include_scope: bool skip_secret_scan: bool - no_tiktoken: bool no_verify_ssl: bool verbose: bool system_prompt_path: str | None @@ -110,7 +109,6 @@ def load_config() -> GACConfig: in ("true", "1", "yes", "on"), "skip_secret_scan": os.getenv("GAC_SKIP_SECRET_SCAN", str(EnvDefaults.SKIP_SECRET_SCAN)).lower() in ("true", "1", "yes", "on"), - "no_tiktoken": os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN)).lower() in ("true", "1", "yes", "on"), "no_verify_ssl": os.getenv("GAC_NO_VERIFY_SSL", str(EnvDefaults.NO_VERIFY_SSL)).lower() in ("true", "1", "yes", "on"), "verbose": os.getenv("GAC_VERBOSE", str(EnvDefaults.VERBOSE)).lower() in ("true", "1", "yes", "on"), diff --git a/src/gac/constants/defaults.py b/src/gac/constants/defaults.py index 4d6b212..546ef56 100644 --- a/src/gac/constants/defaults.py +++ b/src/gac/constants/defaults.py @@ -13,7 +13,6 @@ class EnvDefaults: ALWAYS_INCLUDE_SCOPE: bool = False SKIP_SECRET_SCAN: bool = False VERBOSE: bool = False - NO_TIKTOKEN: bool = False NO_VERIFY_SSL: bool = False # Skip SSL certificate verification (for corporate proxies) HOOK_TIMEOUT: int = 120 # Timeout for pre-commit and lefthook hooks in seconds @@ -34,7 +33,6 @@ class Logging: class Utility: """General utility constants.""" - DEFAULT_ENCODING: str = "cl100k_base" # llm encoding DEFAULT_DIFF_TOKEN_LIMIT: int = 15000 # Maximum tokens for diff processing MAX_WORKERS: int = os.cpu_count() or 4 # Maximum number of parallel workers MAX_DISPLAYED_SECRET_LENGTH: int = 50 # Maximum length for displaying secrets diff --git a/tests/test_ai.py b/tests/test_ai.py index a5f41e3..8384e1b 100644 --- a/tests/test_ai.py +++ b/tests/test_ai.py @@ -3,13 +3,11 @@ from unittest.mock import MagicMock, patch import pytest -import tiktoken from gac.ai import generate_commit_message, generate_grouped_commits from gac.ai_utils import ( count_tokens, extract_text_content, - get_encoding, ) from gac.errors import AIError from gac.providers import PROVIDER_REGISTRY, SUPPORTED_PROVIDERS @@ -34,46 +32,40 @@ def test_extract_text_content(self): # Test empty input assert extract_text_content({}) == "" - def test_get_encoding_known_model(self): - """Test getting encoding for known models with optimized mocking.""" - # Create a mock encoding to avoid slow tiktoken loading - mock_encoding = MagicMock(spec=tiktoken.Encoding) - mock_encoding.name = "cl100k_base" - mock_encoding.encode.return_value = [9906, 1917] # Tokens for "Hello world" - mock_encoding.decode.return_value = "Hello world" - - with patch("tiktoken.encoding_for_model", return_value=mock_encoding): - # Test with a well-known OpenAI model that should map to cl100k_base - encoding = get_encoding("openai:gpt-4") - assert isinstance(encoding, tiktoken.Encoding) - assert encoding.name == "cl100k_base" - - # Verify encoding behavior - tokens = encoding.encode("Hello world") - assert len(tokens) > 0 - assert isinstance(tokens[0], int) - - # Decode should round-trip correctly - decoded = encoding.decode(tokens) - assert decoded == "Hello world" + def test_character_based_counting_simple(self): + """Test simple character-based counting without external dependencies.""" + # Test basic functionality + text = "Hello world" + result = count_tokens(text, "any:model") + expected = round(len(text) / 3.4) + assert result == expected + + # Test with empty string + assert count_tokens("", "any:model") == 0 + + # Test with single character + assert count_tokens("a", "any:model") == 1 def test_count_tokens(self): """Test token counting functionality.""" # Test with string content text = "Hello, world!" token_count = count_tokens(text, "openai:gpt-4") - assert token_count > 0 + expected = round(len(text) / 3.4) + assert token_count == expected assert isinstance(token_count, int) - @patch("gac.ai_utils.count_tokens") - def test_count_tokens_anthropic_mock(self, mock_count_tokens): - """Test that anthropic models are handled correctly.""" - # This tests the code path, not the actual implementation - mock_count_tokens.return_value = 5 + def test_count_tokens_all_models_same(self): + """Test that all models work the same with character-based counting.""" + text = "Test message" + expected = round(len(text) / 3.4) - # Test that anthropic model strings are recognized - model = "anthropic:claude-3-haiku" - assert model.startswith("anthropic") + # Test that all providers give same result + models = ["anthropic:claude-3-haiku", "openai:gpt-4", "groq:llama3", "gemini:gemini-pro"] + + for model in models: + result = count_tokens(text, model) + assert result == expected, f"Model {model} should give {expected}, got {result}" def test_count_tokens_empty_content(self): """Test token counting with empty content.""" @@ -84,55 +76,54 @@ def test_count_tokens_empty_content(self): # Test with list of messages messages = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}] token_count = count_tokens(messages, "openai:gpt-4") - assert token_count > 0 + expected = round(len("Hello\nHi there!") / 3.4) + assert token_count == expected # Test with dict content message = {"role": "user", "content": "Test message"} token_count = count_tokens(message, "openai:gpt-4") - assert token_count > 0 - - def test_get_encoding_unknown_model(self): - """Test getting encoding for unknown models falls back to default.""" - # Create a mock default encoding to avoid slow tiktoken loading - mock_encoding = MagicMock(spec=tiktoken.Encoding) - mock_encoding.name = "cl100k_base" - - with patch("tiktoken.get_encoding", return_value=mock_encoding): - # Clear the cache first to ensure fresh test - get_encoding.cache_clear() - - # Test with unknown model should fall back to default encoding - encoding = get_encoding("unknown:model-xyz") - assert isinstance(encoding, tiktoken.Encoding) - # Should use the default cl100k_base encoding - assert encoding.name == "cl100k_base" - - def test_count_tokens_error_handling(self): - """Test error handling in count_tokens function.""" - # Test with a model that will cause encoding error - with patch("gac.ai_utils.get_encoding") as mock_encoding: - mock_encoding.side_effect = ValueError("Encoding error") - - # Should fall back to character-based estimation (len/4) - token_count = count_tokens("Hello world", "test:model") - assert token_count == len("Hello world") // 4 + expected = round(len("Test message") / 3.4) + assert token_count == expected + + def test_character_based_all_providers_same(self): + """Test that character-based counting works the same for all providers.""" + text = "Sample test message" + expected = round(len(text) / 3.4) + + providers = ["openai:gpt-4", "anthropic:claude-3", "groq:llama3-70b", "gemini:gemini-pro"] + + for provider in providers: + result = count_tokens(text, provider) + assert result == expected, f"Provider {provider} should give {expected}, got {result}" + + def test_character_based_no_errors(self): + """Test that character-based counting never raises errors.""" + # Various inputs that should always work + test_cases = [ + "", + "Simple text", + "Unicode: café résumé", + "Emoji: 🎉🚀", + "New\nline\tand tabs", + ] + + for text in test_cases: + result = count_tokens(text, "any:model") + assert isinstance(result, int) + assert result >= 0 def test_count_tokens_with_various_content_types(self): """Test count_tokens with different content formats.""" - # Mock encoding to avoid slow tiktoken loading - mock_encoding = MagicMock(spec=tiktoken.Encoding) - mock_encoding.encode.return_value = [1, 2, 3, 4, 5] # Mock tokens - - with patch("gac.ai_utils.get_encoding", return_value=mock_encoding): - # Test with list containing invalid items - messages = [ - {"role": "user", "content": "Valid message"}, - {"role": "assistant"}, # Missing content - "invalid", # Not a dict - {"content": "No role"}, # Has content - ] - token_count = count_tokens(messages, "openai:gpt-4") - assert token_count == 5 # Should return mock token count + # Test with list containing various items + messages = [ + {"role": "user", "content": "Valid message"}, + {"role": "assistant"}, # Missing content + "invalid", # Not a dict + {"content": "No role"}, # Has content + ] + token_count = count_tokens(messages, "openai:gpt-4") + expected = round(len("Valid message\nNo role") / 3.4) + assert token_count == expected class TestGenerateCommitMessage: diff --git a/tests/test_ai_utils.py b/tests/test_ai_utils.py index c7e8c4c..1afb34a 100644 --- a/tests/test_ai_utils.py +++ b/tests/test_ai_utils.py @@ -20,129 +20,116 @@ class TestCountTokens: """Test count_tokens function.""" def test_count_tokens(self): - """Test token counting functionality.""" - # Mock encoding to avoid slow tiktoken loading - with patch("gac.ai_utils.get_encoding") as mock_get_encoding: - mock_encoding = MagicMock() - mock_encoding.encode.return_value = [1, 2, 3, 4] # Mock tokens - mock_get_encoding.return_value = mock_encoding - - # Test with string content - text = "Hello, world!" - token_count = ai_utils.count_tokens(text, "openai:gpt-4") - assert token_count == 4 # Should return mock token count - assert isinstance(token_count, int) - - def test_count_tokens_empty_content(self): - """Test token counting with empty content.""" - assert ai_utils.count_tokens("", "openai:gpt-4") == 0 - assert ai_utils.count_tokens([], "openai:gpt-4") == 0 - assert ai_utils.count_tokens({}, "openai:gpt-4") == 0 - - @patch("gac.ai_utils.tiktoken") - def test_local_providers_use_default_encoding(self, mock_tiktoken): - """Test that local providers (ollama, lm-studio, custom-openai, custom-anthropic) use default encoding without network calls.""" - import gac.constants - - # Clear the lru_cache - ai_utils.get_encoding.cache_clear() - - mock_encoding = MagicMock() - mock_encoding.encode.return_value = [1, 2, 3, 4] # 4 tokens for "Hello, world!" - mock_tiktoken.get_encoding.return_value = mock_encoding - + """Test character-based token counting functionality.""" + # Test with string content - "Hello, world!" = 13 chars + # 13 / 3.4 = 3.82, rounded = 4 tokens text = "Hello, world!" - - # Test ollama provider - token_count = ai_utils.count_tokens(text, "ollama:llama2") - assert token_count == 4 - mock_tiktoken.get_encoding.assert_called_with(gac.constants.Utility.DEFAULT_ENCODING) - mock_tiktoken.encoding_for_model.assert_not_called() - - # Reset mock - mock_tiktoken.reset_mock() - ai_utils.get_encoding.cache_clear() - - # Test lm-studio provider - token_count = ai_utils.count_tokens(text, "lm-studio:local-model") - assert token_count == 4 - mock_tiktoken.get_encoding.assert_called_with(gac.constants.Utility.DEFAULT_ENCODING) - mock_tiktoken.encoding_for_model.assert_not_called() - - # Reset mock - mock_tiktoken.reset_mock() - ai_utils.get_encoding.cache_clear() - - # Test custom-openai provider - token_count = ai_utils.count_tokens(text, "custom-openai:local-gpt4") - assert token_count == 4 - mock_tiktoken.get_encoding.assert_called_with(gac.constants.Utility.DEFAULT_ENCODING) - mock_tiktoken.encoding_for_model.assert_not_called() - - # Reset mock - mock_tiktoken.reset_mock() - ai_utils.get_encoding.cache_clear() - - # Test custom-anthropic provider - token_count = ai_utils.count_tokens(text, "custom-anthropic:local-claude") - assert token_count == 4 - mock_tiktoken.get_encoding.assert_called_with(gac.constants.Utility.DEFAULT_ENCODING) - mock_tiktoken.encoding_for_model.assert_not_called() - - @patch("gac.ai_utils.tiktoken") - def test_cloud_providers_use_model_specific_encoding(self, mock_tiktoken): - """Test that cloud providers try to use model-specific encoding first.""" - # Clear the lru_cache - ai_utils.get_encoding.cache_clear() - - mock_encoding = MagicMock() - mock_encoding.encode.return_value = [1, 2, 3, 4] # 4 tokens for "Hello, world!" - mock_tiktoken.encoding_for_model.return_value = mock_encoding - - text = "Hello, world!" - - # Test openai provider token_count = ai_utils.count_tokens(text, "openai:gpt-4") - assert token_count == 4 - mock_tiktoken.encoding_for_model.assert_called_with("gpt-4") - mock_tiktoken.get_encoding.assert_not_called() + assert token_count == 4 # Should return calculated token count + assert isinstance(token_count, int) - @patch("gac.ai_utils.tiktoken") - def test_fallback_to_default_encoding_on_error(self, mock_tiktoken): - """Test fallback to default encoding when model-specific encoding fails.""" - import gac.constants - - # Clear the lru_cache - ai_utils.get_encoding.cache_clear() - - mock_encoding = MagicMock() - mock_encoding.encode.return_value = [1, 2, 3, 4] # 4 tokens for "Hello, world!" - mock_tiktoken.encoding_for_model.side_effect = ConnectionError("Network error") - mock_tiktoken.get_encoding.return_value = mock_encoding - - text = "Hello, world!" - - # Test with cloud provider that fails + # Test different text length + text = "This is a test message" # 22 chars + # 22 / 3.4 = 6.47, rounded = 6 tokens token_count = ai_utils.count_tokens(text, "openai:gpt-4") - assert token_count == 4 - mock_tiktoken.encoding_for_model.assert_called_with("gpt-4") - mock_tiktoken.get_encoding.assert_called_with(gac.constants.Utility.DEFAULT_ENCODING) + assert token_count == 6 - def test_no_tiktoken_mode_skips_tiktoken(self, monkeypatch): - """Ensure rough token counting mode bypasses tiktoken entirely.""" - monkeypatch.setenv("GAC_NO_TIKTOKEN", "true") - ai_utils._should_skip_tiktoken_counting.cache_clear() + # Test with list format + messages = [{"role": "user", "content": "Hello"}] # 5 chars + # 5 / 3.4 = 1.47, rounded = 1 token + token_count = ai_utils.count_tokens(messages, "openai:gpt-4") + assert token_count == 1 - sample_text = "offline token counting" + # Test with empty string vs no characters + assert ai_utils.count_tokens("", "openai:gpt-4") == 0 + assert ai_utils.count_tokens(" ", "openai:gpt-4") == 1 # 3 spaces = 3/3.4 = 0.88, rounded = 1 - with patch("gac.ai_utils.get_encoding") as mock_get_encoding: - tokens = ai_utils.count_tokens(sample_text, "openai:gpt-4") + def test_count_tokens_empty_content(self): + """Test token counting with empty content.""" + assert ai_utils.count_tokens("", "openai:gpt-4") == 0 + assert ai_utils.count_tokens([], "openai:gpt-4") == 0 + assert ai_utils.count_tokens({}, "openai:gpt-4") == 0 - assert tokens == len(sample_text) // 4 - mock_get_encoding.assert_not_called() + def test_all_providers_use_same_character_based_counting(self): + """Test that all providers use the same character-based counting.""" + text = "Hello, world!" # 13 chars + # 13 / 3.4 = 3.82, rounded = 4 tokens + expected_tokens = round(len(text) / 3.4) + + # Test various providers - all should give the same result + providers_and_models = [ + "openai:gpt-4", + "anthropic:claude-3", + "ollama:llama2", + "lm-studio:local-model", + "custom-openai:local-gpt4", + "custom-anthropic:local-claude", + "groq:llama3-70b", + "gemini:gemini-pro", + ] + + for model in providers_and_models: + token_count = ai_utils.count_tokens(text, model) + assert token_count == expected_tokens, ( + f"Provider {model} should give {expected_tokens} tokens, got {token_count}" + ) + + def test_character_based_calculation_examples(self): + """Test specific examples of character-based token calculation.""" + test_cases = [ + ("Hello", 1), # 5 chars / 3.4 = 1.47 -> 1 token + ("Hello world", 3), # 11 chars / 3.4 = 3.24 -> 3 tokens + ("This is a test", 4), # 14 chars / 3.4 = 4.12 -> 4 tokens + ("", 0), # Empty string = 0 tokens + ("a", 1), # 1 char / 3.4 = 0.29 -> rounded = 0, but we force 1 for non-empty + ] + + for text, expected_tokens in test_cases: + token_count = ai_utils.count_tokens(text, "openai:gpt-4") + assert token_count == expected_tokens, ( + f"Text '{text}' should give {expected_tokens} tokens, got {token_count}" + ) - monkeypatch.delenv("GAC_NO_TIKTOKEN", raising=False) - ai_utils._should_skip_tiktoken_counting.cache_clear() + def test_character_based_calculation_edge_cases(self): + """Test edge cases for character-based token calculation.""" + # Test very short text + assert ai_utils.count_tokens("a", "openai:gpt-4") == 1 # 1 char, forced to 1 token + + # Test long text + long_text = "a" * 100 # 100 chars + # 100 / 3.4 = 29.41 -> 29 tokens + expected = round(100 / 3.4) + assert ai_utils.count_tokens(long_text, "openai:gpt-4") == expected + + # Test with spaces and newlines + text_with_spaces = "Hello \n\n world" # 14 chars including spaces and newlines + expected = round(14 / 3.4) # = 4 tokens + actual = ai_utils.count_tokens(text_with_spaces, "openai:gpt-4") + assert actual == expected, f"Expected {expected}, got {actual}" + + def test_character_based_calculation_accuracy(self): + """Test that character-based calculation gives reasonable results.""" + # Test that our calculation gives consistent results + text = "The quick brown fox jumps over the lazy dog" + token_count = ai_utils.count_tokens(text, "any:model") + + # Should be the same as the direct calculation + expected = round(len(text) / 3.4) + assert token_count == expected + + # Should be reasonable for this text (not way off) + assert 10 <= token_count <= 15 # Reasonable range for this sentence + + # Test that different content lengths scale appropriately + short_text = "Hi" + medium_text = "This is a medium length message" + long_text = short_text * 50 + + short_tokens = ai_utils.count_tokens(short_text, "any:model") + medium_tokens = ai_utils.count_tokens(medium_text, "any:model") + long_tokens = ai_utils.count_tokens(long_text, "any:model") + + assert short_tokens < medium_tokens < long_tokens class TestAIError: diff --git a/tests/test_ai_utils_additional.py b/tests/test_ai_utils_additional.py index a94a022..5d599ac 100644 --- a/tests/test_ai_utils_additional.py +++ b/tests/test_ai_utils_additional.py @@ -4,14 +4,11 @@ from unittest import mock import pytest -import tiktoken from gac.ai_utils import ( - _should_skip_tiktoken_counting, count_tokens, extract_text_content, generate_with_retries, - get_encoding, ) from gac.errors import AIError @@ -50,6 +47,23 @@ def test_count_tokens_invalid_dict(self): result = count_tokens(content, "test-model") assert result == 0 + def test_count_tokens_character_based_math(self): + """Test the exact math of character-based token counting.""" + # Test specific lengths and their expected outputs + test_cases = [ + ("", 0), # Empty = 0 + ("a", 1), # 1 char = max(1, round(1/3.4)) = 1 + ("ab", 1), # 2 chars = round(2/3.4) = 1 + ("abc", 1), # 3 chars = round(3/3.4) = 1 + ("abcd", 1), # 4 chars = round(4/3.4) = 1 + ("abcde", 1), # 5 chars = round(5/3.4) = 1 + ("abcdef", 2), # 6 chars = round(6/3.4) = 2 + ] + + for text, expected in test_cases: + result = count_tokens(text, "any:model") + assert result == expected, f"Text '{text}' (len={len(text)}) expected {expected}, got {result}" + def test_extract_text_content_string(self): """Test extract_text_content with string.""" result = extract_text_content("Hello world") @@ -86,52 +100,59 @@ def test_extract_text_content_invalid_dict(self): result = extract_text_content(content) assert result == "" - def test_get_encoding_keyerror_fallback(self): - """Test get_encoding with KeyError fallback (line 138, 142).""" - with mock.patch("tiktoken.encoding_for_model", side_effect=KeyError("Model not found")): - result = get_encoding("openai:unknown-model") - assert isinstance(result, tiktoken.Encoding) - - def test_get_encoding_oserror_fallback(self): - """Test get_encoding with OSError fallback (line 142).""" - with mock.patch("tiktoken.encoding_for_model", side_effect=OSError("Network error")): - result = get_encoding("openai:gpt-4o") - assert isinstance(result, tiktoken.Encoding) - - def test_get_encoding_connection_error_fallback(self): - """Test get_encoding with ConnectionError fallback.""" - with mock.patch("tiktoken.encoding_for_model", side_effect=ConnectionError("Connection failed")): - result = get_encoding("openai:gpt-4o") - assert isinstance(result, tiktoken.Encoding) - - def test_get_encoding_non_openai_provider(self): - """Test get_encoding with non-OpenAI provider.""" - result = get_encoding("anthropic:claude-3") - assert isinstance(result, tiktoken.Encoding) - - def test_should_skip_tiktoken_counting_env_true(self): - """Test _should_skip_tiktoken_counting with GAC_NO_TIKTOKEN=true.""" + def test_character_based_no_external_dependencies(self): + """Test that character-based counting has no external dependencies.""" + # This test ensures we don't import tiktoken or make any network calls + text = "Simple test text" + expected = round(len(text) / 3.4) + + # Should work regardless of environment + result = count_tokens(text, "any:provider-model") + assert result == expected + + # Should work for any provider (no provider-specific logic) + providers = ["openai", "anthropic", "groq", "gemini", "ollama"] + for provider in providers: + result = count_tokens(text, f"{provider}:some-model") + assert result == expected + assert isinstance(result, int) # Always returns an integer token count + + def test_character_based_no_model_specific_logic(self): + """Test that character-based counting has no model-specific logic.""" + # Since we removed get_encoding, we just test that all models work the same + text = "Test message" + expected = round(len(text) / 3.4) + + # All models should give the same result regardless of provider + models = ["openai:gpt-4", "anthropic:claude-3", "groq:llama3-70b", "gemini:gemini-pro", "ollama:llama2"] + + for model in models: + result = count_tokens(text, model) + assert result == expected, f"Model {model} should give {expected} tokens, got {result}" + + def test_character_based_token_counting_no_env_dependency(self): + """Test that character-based token counting doesn't depend on any environment variables.""" + # Test that the function works the same regardless of environment variables + text = "Hello world" + expected = round(len(text) / 3.4) + + # Test with no env var set + actual = count_tokens(text, "openai:gpt-4") + assert actual == expected + + # Test with various env vars that used to affect tiktoken behavior with mock.patch.dict(os.environ, {"GAC_NO_TIKTOKEN": "true"}): - # Clear the cache to pick up the new environment variable - _should_skip_tiktoken_counting.cache_clear() - result = _should_skip_tiktoken_counting() - assert result is True + actual = count_tokens(text, "openai:gpt-4") + assert actual == expected - def test_should_skip_tiktoken_counting_env_false(self): - """Test _should_skip_tiktoken_counting with GAC_NO_TIKTOKEN=false.""" with mock.patch.dict(os.environ, {"GAC_NO_TIKTOKEN": "false"}): - # Clear the cache to pick up the new environment variable - _should_skip_tiktoken_counting.cache_clear() - result = _should_skip_tiktoken_counting() - assert result is False - - def test_should_skip_tiktoken_counting_default(self): - """Test _should_skip_tiktoken_counting with default.""" - with mock.patch.dict(os.environ, {}, clear=True): - # Clear the cache to pick up the new environment variable - _should_skip_tiktoken_counting.cache_clear() - result = _should_skip_tiktoken_counting() - assert result is False # Default should be False + actual = count_tokens(text, "openai:gpt-4") + assert actual == expected + + # Test with some random env var that shouldn't affect anything + with mock.patch.dict(os.environ, {"RANDOM_VAR": "some_value"}): + actual = count_tokens(text, "openai:gpt-4") + assert actual == expected def test_generate_with_retries_invalid_model_format(self): """Test generate_with_retries with invalid model format.""" @@ -368,31 +389,59 @@ def always_error_provider(**kwargs): ) assert "Failed to generate commit message after 2 retries" in str(exc_info.value) - def test_count_tokens_tiktoken_error_fallback(self): - """Test count_tokens falls back when tiktoken fails.""" - with mock.patch("gac.ai_utils.get_encoding", side_effect=KeyError("Encoding not found")): - with mock.patch("gac.ai_utils._should_skip_tiktoken_counting", return_value=False): - result = count_tokens("Hello world", "test-model") - # Should fallback to rough estimation: len(text) // 4 - assert result == len("Hello world") // 4 - - def test_count_tokens_unicode_error_fallback(self): - """Test count_tokens falls back on UnicodeError.""" - with mock.patch("tiktoken.encoding_for_model", side_effect=UnicodeError("Unicode error")): - with mock.patch("gac.ai_utils._should_skip_tiktoken_counting", return_value=False): - result = count_tokens("Hello world", "test-model") - assert result == len("Hello world") // 4 - - def test_count_tokens_value_error_fallback(self): - """Test count_tokens falls back on ValueError.""" - with mock.patch("tiktoken.encoding_for_model", side_effect=ValueError("Value error")): - with mock.patch("gac.ai_utils._should_skip_tiktoken_counting", return_value=False): - result = count_tokens("Hello world", "test-model") - assert result == len("Hello world") // 4 - - def test_count_tokens_skip_tiktoken(self): - """Test count_tokens when tiktoken counting is skipped.""" - with mock.patch("gac.ai_utils._should_skip_tiktoken_counting", return_value=True): - result = count_tokens("Hello world", "test-model") - # Should use rough estimation - assert result == len("Hello world") // 4 + def test_character_based_no_fallback_needed(self): + """Test that character-based counting never needs fallback.""" + # Since we removed tiktoken, there's no fallback needed + # Just test that it works consistently + text = "Hello world" + expected = round(len(text) / 3.4) + + result = count_tokens(text, "test-model") + assert result == expected + + # Test with empty text + assert count_tokens("", "test-model") == 0 + + # Test that it works for any model (no model-specific logic) + models = ["openai:gpt-4", "anthropic:claude-3", "ollama:llama2"] + for model in models: + result = count_tokens(text, model) + assert result == expected + + def test_character_based_works_with_unicode(self): + """Test that character-based counting works with Unicode text.""" + unicode_text = "Hello 🌍 世界! 🐍" # Mix of ASCII and emoji + expected = round(len(unicode_text) / 3.4) + result = count_tokens(unicode_text, "test-model") + assert result == expected + + def test_character_based_always_consistent(self): + """Test that character-based counting is always consistent.""" + text = "Test message" + expected = round(len(text) / 3.4) + + # Should always give the same result - no errors or fallbacks needed + multiple_calls = [count_tokens(text, "test-model") for _ in range(5)] + assert all(call == expected for call in multiple_calls) + + def test_character_based_no_errors(self): + """Test that character-based counting never raises exceptions.""" + # Various inputs that should never cause errors + test_cases = [ + "", + "Simple text", + "Text with unicode: café", + "Emoji: 🎉🚀", + "Newlines\nand\ttabs", + "A" * 1000, # Long text + ] + + for text in test_cases: + result = count_tokens(text, "any:model") + assert isinstance(result, int) + assert result >= 0 + # Should use character-based calculation + expected = round(len(text) / 3.4) + if text and expected == 0: + expected = 1 # Ensure at least 1 token for non-empty text + assert result == expected, f"Text '{text[:20]}...' should give {expected} tokens, got {result}" diff --git a/tests/test_ai_utils_extended.py b/tests/test_ai_utils_extended.py index 7efcc5b..a0993b3 100644 --- a/tests/test_ai_utils_extended.py +++ b/tests/test_ai_utils_extended.py @@ -1,18 +1,14 @@ """Extended tests for ai_utils.py to improve coverage from 56% to 90%+.""" -import os from unittest.mock import MagicMock, patch import pytest from gac.ai_utils import ( - _should_skip_tiktoken_counting, count_tokens, extract_text_content, generate_with_retries, - get_encoding, ) -from gac.constants import Utility from gac.errors import AIError @@ -61,122 +57,57 @@ def test_extract_from_list_empty_list(self): assert result == "" -class TestGetEncodingExtended: - """Test get_encoding function with various scenarios.""" - - @patch("tiktoken.get_encoding") - @patch("tiktoken.encoding_for_model") - def test_provider_not_openai_returns_default(self, mock_encoding_for_model, mock_get_encoding): - """Test non-OpenAI provider returns default encoding (line 76).""" - mock_get_encoding.return_value = "default_encoding" - result = get_encoding("anthropic:claude-3-sonnet") - mock_get_encoding.assert_called_once_with(Utility.DEFAULT_ENCODING) - assert result == "default_encoding" - - @patch("tiktoken.encoding_for_model") - @patch("tiktoken.get_encoding") - def test_openai_model_encoding_success(self, mock_get_encoding, mock_encoding_for_model): - """Test successful OpenAI model encoding retrieval.""" - mock_encoding = MagicMock() - mock_encoding_for_model.return_value = mock_encoding - - result = get_encoding("openai:gpt-5-nano") - assert result == mock_encoding - mock_encoding_for_model.assert_called_once_with("gpt-5-nano") - - @patch("tiktoken.encoding_for_model") - @patch("tiktoken.get_encoding") - def test_openai_model_key_error_fallback(self, mock_get_encoding, mock_encoding_for_model): - """Test KeyError fallback to default encoding.""" - mock_encoding_for_model.side_effect = KeyError("Model not found") - mock_get_encoding.return_value = "default_encoding" - - result = get_encoding("openai:unknown-model") - assert result == "default_encoding" - mock_get_encoding.assert_called_once_with(Utility.DEFAULT_ENCODING) - - @patch("tiktoken.encoding_for_model") - @patch("tiktoken.get_encoding") - def test_openai_model_connection_error_fallback(self, mock_get_encoding, mock_encoding_for_model): - """Test ConnectionError fallback to default encoding.""" - mock_encoding_for_model.side_effect = ConnectionError("Network error") - mock_get_encoding.return_value = "default_encoding" - - result = get_encoding("openai:gpt-5-nano") - assert result == "default_encoding" - mock_get_encoding.assert_called_once_with(Utility.DEFAULT_ENCODING) - - @patch("gac.ai_utils.tiktoken.get_encoding") - def test_openai_model_os_error_fallback(self, mock_get_encoding): - """Test OSError fallback to default encoding.""" - # Clear cache to ensure clean test - get_encoding.cache_clear() - - # Remove patches that interfere with each other and simplify - with patch("gac.ai_utils.tiktoken.encoding_for_model", side_effect=OSError("SSL error")): - mock_get_encoding.return_value = "default_encoding" - - result = get_encoding("openai:gpt-5-nano") - assert result == "default_encoding" - # The fallback should be called - mock_get_encoding.assert_called_once_with(Utility.DEFAULT_ENCODING) - - -class TestCountTokensExtended: - """Test count_tokens function with various edge cases.""" - - @patch("gac.ai_utils.get_encoding") - @patch("gac.ai_utils.extract_text_content") - def test_count_tokens_key_error_fallback(self, mock_extract, mock_encoding): - """Test KeyError during encoding (lines 47-50).""" - mock_extract.return_value = "sample text" - mock_encoding.side_effect = KeyError("Unknown model") - - result = count_tokens("sample text", "unknown:model") - assert result == len("sample text") // 4 # Fallback estimation - - @patch("gac.ai_utils.get_encoding") - @patch("gac.ai_utils.extract_text_content") - def test_count_tokens_unicode_error_fallback(self, mock_extract, mock_encoding): - """Test UnicodeError during encoding (lines 47-50).""" - mock_extract.return_value = "sample text" - mock_encoding.side_effect = UnicodeError("Encoding error") - - result = count_tokens("sample text", "test:model") - assert result == len("sample text") // 4 # Fallback estimation - - @patch("gac.ai_utils.get_encoding") - @patch("gac.ai_utils.extract_text_content") - def test_count_tokens_value_error_fallback(self, mock_extract, mock_encoding): - """Test ValueError during encoding (lines 47-50).""" - mock_extract.return_value = "sample text" - mock_encoding.side_effect = ValueError("Invalid input") - - result = count_tokens("sample text", "test:model") - assert result == len("sample text") // 4 # Fallback estimation - - def test_count_tokens_empty_content_with_skip_tiktoken(self): - """Test empty content returns 0 even in skip mode.""" - with patch.dict(os.environ, {"GAC_NO_TIKTOKEN": "true"}): - # Clear the cache to ensure fresh check - _should_skip_tiktoken_counting.cache_clear() - - result = count_tokens("", "any:model") - assert result == 0 - - def test_should_skip_tiktoken_counting_false(self): - """Test that tiktoken is not skipped by default.""" - with patch.dict(os.environ, {"GAC_NO_TIKTOKEN": "false"}, clear=True): - _should_skip_tiktoken_counting.cache_clear() - result = _should_skip_tiktoken_counting() - assert result is False - - def test_should_skip_tiktoken_counting_true_from_env(self): - """Test tiktoken skip from environment variable.""" - with patch.dict(os.environ, {"GAC_NO_TIKTOKEN": "true"}): - _should_skip_tiktoken_counting.cache_clear() - result = _should_skip_tiktoken_counting() - assert result is True +class TestCharacterBasedCountingExtended: + """Test character-based counting function with various scenarios.""" + + def test_all_providers_same_result(self): + """Test that all providers provide the same result (no provider-specific logic).""" + text = "Sample test message" + expected = round(len(text) / 3.4) + + providers = [ + "openai:gpt-4", + "anthropic:claude-3", + "groq:llama3-70b", + "gemini:gemini-pro", + "ollama:llama2", + "custom-openai:local-model", + "lm-studio:local-model", + ] + + for provider in providers: + result = count_tokens(text, provider) + assert result == expected, f"Provider {provider} should give {expected}, got {result}" + + def test_various_text_lengths(self): + """Test token counting with various text lengths.""" + test_cases = [ + ("", 0), # Empty + ("a", 1), # Single char + ("Hello", 1), # Word + ("Hello world", 3), # Sentence + ("This is a longer sentence with multiple words.", 14), # Longer text + ] + + for text, expected in test_cases: + result = count_tokens(text, "any:model") + assert result == expected, f"Text '{text}' should give {expected} tokens, got {result}" + + def test_unicode_and_special_characters(self): + """Test token counting with Unicode and special characters.""" + test_cases = [ + ("café", 2), # Unicode characters + ("🤖🚀", 2), # Emoji + ("\n\t\r", 1), # Control characters + ("¡Hola! ¿Cómo estás?", 5), # Accented characters + ] + + for text, _expected in test_cases: + result = count_tokens(text, "any:model") + calculated = round(len(text) / 3.4) + if text and calculated == 0: + calculated = 1 + assert result == calculated, f"Text '{text}' should give {calculated} tokens, got {result}" class TestGenerateWithRetriesExtended: diff --git a/tests/test_config.py b/tests/test_config.py index 2f3a252..7d2c735 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -19,14 +19,12 @@ def test_load_config_env(tmp_path, monkeypatch): monkeypatch.setenv("GAC_MAX_OUTPUT_TOKENS", "1234") monkeypatch.setenv("GAC_RETRIES", "7") monkeypatch.setenv("GAC_LOG_LEVEL", "DEBUG") - monkeypatch.setenv("GAC_NO_TIKTOKEN", "true") config = load_config() assert config["model"] == "env-model" assert config["temperature"] == 0.5 assert config["max_output_tokens"] == 1234 assert config["max_retries"] == 7 assert config["log_level"] == "DEBUG" - assert config["no_tiktoken"] is True def test_load_config_project_gac_env(tmp_path, monkeypatch): diff --git a/tests/test_constants.py b/tests/test_constants.py index 43aded2..7c4f477 100644 --- a/tests/test_constants.py +++ b/tests/test_constants.py @@ -35,9 +35,12 @@ def test_logging_constants(self): assert "ERROR" in Logging.LEVELS assert len(Logging.LEVELS) == 4 # Ensure no unexpected levels - def test_encoding_constants(self): - """Test encoding constants.""" - assert Utility.DEFAULT_ENCODING == "cl100k_base" # Verify base encoding for tokenization + def test_token_constants(self): + """Test token-related constants.""" + # Since we removed tiktoken, we don't have encoding constants anymore + # Just test that Utility exists and has expected attributes + assert hasattr(Utility, "DEFAULT_DIFF_TOKEN_LIMIT") + assert Utility.DEFAULT_DIFF_TOKEN_LIMIT > 0 def test_languages_code_map(self): """Test the language code mapping dictionary.""" diff --git a/uv.lock b/uv.lock index 7a2da9b..088471a 100644 --- a/uv.lock +++ b/uv.lock @@ -430,7 +430,6 @@ dependencies = [ { name = "python-dotenv" }, { name = "questionary" }, { name = "rich" }, - { name = "tiktoken" }, ] [package.optional-dependencies] @@ -466,7 +465,6 @@ requires-dist = [ { name = "questionary" }, { name = "rich", specifier = ">=14.1.0" }, { name = "ruff", marker = "extra == 'dev'" }, - { name = "tiktoken", specifier = ">=0.12.0" }, { name = "twine", marker = "extra == 'dev'" }, ] provides-extras = ["dev"] @@ -1101,113 +1099,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310 }, ] -[[package]] -name = "regex" -version = "2025.11.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/d6/d788d52da01280a30a3f6268aef2aa71043bff359c618fea4c5b536654d5/regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af", size = 488087 }, - { url = "https://files.pythonhosted.org/packages/69/39/abec3bd688ec9bbea3562de0fd764ff802976185f5ff22807bf0a2697992/regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313", size = 290544 }, - { url = "https://files.pythonhosted.org/packages/39/b3/9a231475d5653e60002508f41205c61684bb2ffbf2401351ae2186897fc4/regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56", size = 288408 }, - { url = "https://files.pythonhosted.org/packages/c3/c5/1929a0491bd5ac2d1539a866768b88965fa8c405f3e16a8cef84313098d6/regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28", size = 781584 }, - { url = "https://files.pythonhosted.org/packages/ce/fd/16aa16cf5d497ef727ec966f74164fbe75d6516d3d58ac9aa989bc9cdaad/regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7", size = 850733 }, - { url = "https://files.pythonhosted.org/packages/e6/49/3294b988855a221cb6565189edf5dc43239957427df2d81d4a6b15244f64/regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32", size = 898691 }, - { url = "https://files.pythonhosted.org/packages/14/62/b56d29e70b03666193369bdbdedfdc23946dbe9f81dd78ce262c74d988ab/regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391", size = 791662 }, - { url = "https://files.pythonhosted.org/packages/15/fc/e4c31d061eced63fbf1ce9d853975f912c61a7d406ea14eda2dd355f48e7/regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5", size = 782587 }, - { url = "https://files.pythonhosted.org/packages/b2/bb/5e30c7394bcf63f0537121c23e796be67b55a8847c3956ae6068f4c70702/regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7", size = 774709 }, - { url = "https://files.pythonhosted.org/packages/c5/c4/fce773710af81b0cb37cb4ff0947e75d5d17dee304b93d940b87a67fc2f4/regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313", size = 845773 }, - { url = "https://files.pythonhosted.org/packages/7b/5e/9466a7ec4b8ec282077095c6eb50a12a389d2e036581134d4919e8ca518c/regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9", size = 836164 }, - { url = "https://files.pythonhosted.org/packages/95/18/82980a60e8ed1594eb3c89eb814fb276ef51b9af7caeab1340bfd8564af6/regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5", size = 779832 }, - { url = "https://files.pythonhosted.org/packages/03/cc/90ab0fdbe6dce064a42015433f9152710139fb04a8b81b4fb57a1cb63ffa/regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec", size = 265802 }, - { url = "https://files.pythonhosted.org/packages/34/9d/e9e8493a85f3b1ddc4a5014465f5c2b78c3ea1cbf238dcfde78956378041/regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd", size = 277722 }, - { url = "https://files.pythonhosted.org/packages/15/c4/b54b24f553966564506dbf873a3e080aef47b356a3b39b5d5aba992b50db/regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e", size = 270289 }, - { url = "https://files.pythonhosted.org/packages/f7/90/4fb5056e5f03a7048abd2b11f598d464f0c167de4f2a51aa868c376b8c70/regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031", size = 488081 }, - { url = "https://files.pythonhosted.org/packages/85/23/63e481293fac8b069d84fba0299b6666df720d875110efd0338406b5d360/regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4", size = 290554 }, - { url = "https://files.pythonhosted.org/packages/2b/9d/b101d0262ea293a0066b4522dfb722eb6a8785a8c3e084396a5f2c431a46/regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50", size = 288407 }, - { url = "https://files.pythonhosted.org/packages/0c/64/79241c8209d5b7e00577ec9dca35cd493cc6be35b7d147eda367d6179f6d/regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f", size = 793418 }, - { url = "https://files.pythonhosted.org/packages/3d/e2/23cd5d3573901ce8f9757c92ca4db4d09600b865919b6d3e7f69f03b1afd/regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118", size = 860448 }, - { url = "https://files.pythonhosted.org/packages/2a/4c/aecf31beeaa416d0ae4ecb852148d38db35391aac19c687b5d56aedf3a8b/regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2", size = 907139 }, - { url = "https://files.pythonhosted.org/packages/61/22/b8cb00df7d2b5e0875f60628594d44dba283e951b1ae17c12f99e332cc0a/regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e", size = 800439 }, - { url = "https://files.pythonhosted.org/packages/02/a8/c4b20330a5cdc7a8eb265f9ce593f389a6a88a0c5f280cf4d978f33966bc/regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0", size = 782965 }, - { url = "https://files.pythonhosted.org/packages/b4/4c/ae3e52988ae74af4b04d2af32fee4e8077f26e51b62ec2d12d246876bea2/regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58", size = 854398 }, - { url = "https://files.pythonhosted.org/packages/06/d1/a8b9cf45874eda14b2e275157ce3b304c87e10fb38d9fc26a6e14eb18227/regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab", size = 845897 }, - { url = "https://files.pythonhosted.org/packages/ea/fe/1830eb0236be93d9b145e0bd8ab499f31602fe0999b1f19e99955aa8fe20/regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e", size = 788906 }, - { url = "https://files.pythonhosted.org/packages/66/47/dc2577c1f95f188c1e13e2e69d8825a5ac582ac709942f8a03af42ed6e93/regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf", size = 265812 }, - { url = "https://files.pythonhosted.org/packages/50/1e/15f08b2f82a9bbb510621ec9042547b54d11e83cb620643ebb54e4eb7d71/regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a", size = 277737 }, - { url = "https://files.pythonhosted.org/packages/f4/fc/6500eb39f5f76c5e47a398df82e6b535a5e345f839581012a418b16f9cc3/regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc", size = 270290 }, - { url = "https://files.pythonhosted.org/packages/e8/74/18f04cb53e58e3fb107439699bd8375cf5a835eec81084e0bddbd122e4c2/regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41", size = 489312 }, - { url = "https://files.pythonhosted.org/packages/78/3f/37fcdd0d2b1e78909108a876580485ea37c91e1acf66d3bb8e736348f441/regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36", size = 291256 }, - { url = "https://files.pythonhosted.org/packages/bf/26/0a575f58eb23b7ebd67a45fccbc02ac030b737b896b7e7a909ffe43ffd6a/regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1", size = 288921 }, - { url = "https://files.pythonhosted.org/packages/ea/98/6a8dff667d1af907150432cf5abc05a17ccd32c72a3615410d5365ac167a/regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7", size = 798568 }, - { url = "https://files.pythonhosted.org/packages/64/15/92c1db4fa4e12733dd5a526c2dd2b6edcbfe13257e135fc0f6c57f34c173/regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69", size = 864165 }, - { url = "https://files.pythonhosted.org/packages/f9/e7/3ad7da8cdee1ce66c7cd37ab5ab05c463a86ffeb52b1a25fe7bd9293b36c/regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48", size = 912182 }, - { url = "https://files.pythonhosted.org/packages/84/bd/9ce9f629fcb714ffc2c3faf62b6766ecb7a585e1e885eb699bcf130a5209/regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c", size = 803501 }, - { url = "https://files.pythonhosted.org/packages/7c/0f/8dc2e4349d8e877283e6edd6c12bdcebc20f03744e86f197ab6e4492bf08/regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695", size = 787842 }, - { url = "https://files.pythonhosted.org/packages/f9/73/cff02702960bc185164d5619c0c62a2f598a6abff6695d391b096237d4ab/regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98", size = 858519 }, - { url = "https://files.pythonhosted.org/packages/61/83/0e8d1ae71e15bc1dc36231c90b46ee35f9d52fab2e226b0e039e7ea9c10a/regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74", size = 850611 }, - { url = "https://files.pythonhosted.org/packages/c8/f5/70a5cdd781dcfaa12556f2955bf170cd603cb1c96a1827479f8faea2df97/regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0", size = 789759 }, - { url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194 }, - { url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069 }, - { url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330 }, - { url = "https://files.pythonhosted.org/packages/e1/a7/dda24ebd49da46a197436ad96378f17df30ceb40e52e859fc42cac45b850/regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4", size = 489081 }, - { url = "https://files.pythonhosted.org/packages/19/22/af2dc751aacf88089836aa088a1a11c4f21a04707eb1b0478e8e8fb32847/regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76", size = 291123 }, - { url = "https://files.pythonhosted.org/packages/a3/88/1a3ea5672f4b0a84802ee9891b86743438e7c04eb0b8f8c4e16a42375327/regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a", size = 288814 }, - { url = "https://files.pythonhosted.org/packages/fb/8c/f5987895bf42b8ddeea1b315c9fedcfe07cadee28b9c98cf50d00adcb14d/regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361", size = 798592 }, - { url = "https://files.pythonhosted.org/packages/99/2a/6591ebeede78203fa77ee46a1c36649e02df9eaa77a033d1ccdf2fcd5d4e/regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160", size = 864122 }, - { url = "https://files.pythonhosted.org/packages/94/d6/be32a87cf28cf8ed064ff281cfbd49aefd90242a83e4b08b5a86b38e8eb4/regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe", size = 912272 }, - { url = "https://files.pythonhosted.org/packages/62/11/9bcef2d1445665b180ac7f230406ad80671f0fc2a6ffb93493b5dd8cd64c/regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850", size = 803497 }, - { url = "https://files.pythonhosted.org/packages/e5/a7/da0dc273d57f560399aa16d8a68ae7f9b57679476fc7ace46501d455fe84/regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc", size = 787892 }, - { url = "https://files.pythonhosted.org/packages/da/4b/732a0c5a9736a0b8d6d720d4945a2f1e6f38f87f48f3173559f53e8d5d82/regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9", size = 858462 }, - { url = "https://files.pythonhosted.org/packages/0c/f5/a2a03df27dc4c2d0c769220f5110ba8c4084b0bfa9ab0f9b4fcfa3d2b0fc/regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b", size = 850528 }, - { url = "https://files.pythonhosted.org/packages/d6/09/e1cd5bee3841c7f6eb37d95ca91cdee7100b8f88b81e41c2ef426910891a/regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7", size = 789866 }, - { url = "https://files.pythonhosted.org/packages/eb/51/702f5ea74e2a9c13d855a6a85b7f80c30f9e72a95493260193c07f3f8d74/regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c", size = 266189 }, - { url = "https://files.pythonhosted.org/packages/8b/00/6e29bb314e271a743170e53649db0fdb8e8ff0b64b4f425f5602f4eb9014/regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5", size = 277054 }, - { url = "https://files.pythonhosted.org/packages/25/f1/b156ff9f2ec9ac441710764dda95e4edaf5f36aca48246d1eea3f1fd96ec/regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467", size = 270325 }, - { url = "https://files.pythonhosted.org/packages/20/28/fd0c63357caefe5680b8ea052131acbd7f456893b69cc2a90cc3e0dc90d4/regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281", size = 491984 }, - { url = "https://files.pythonhosted.org/packages/df/ec/7014c15626ab46b902b3bcc4b28a7bae46d8f281fc7ea9c95e22fcaaa917/regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39", size = 292673 }, - { url = "https://files.pythonhosted.org/packages/23/ab/3b952ff7239f20d05f1f99e9e20188513905f218c81d52fb5e78d2bf7634/regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7", size = 291029 }, - { url = "https://files.pythonhosted.org/packages/21/7e/3dc2749fc684f455f162dcafb8a187b559e2614f3826877d3844a131f37b/regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed", size = 807437 }, - { url = "https://files.pythonhosted.org/packages/1b/0b/d529a85ab349c6a25d1ca783235b6e3eedf187247eab536797021f7126c6/regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19", size = 873368 }, - { url = "https://files.pythonhosted.org/packages/7d/18/2d868155f8c9e3e9d8f9e10c64e9a9f496bb8f7e037a88a8bed26b435af6/regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b", size = 914921 }, - { url = "https://files.pythonhosted.org/packages/2d/71/9d72ff0f354fa783fe2ba913c8734c3b433b86406117a8db4ea2bf1c7a2f/regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a", size = 812708 }, - { url = "https://files.pythonhosted.org/packages/e7/19/ce4bf7f5575c97f82b6e804ffb5c4e940c62609ab2a0d9538d47a7fdf7d4/regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6", size = 795472 }, - { url = "https://files.pythonhosted.org/packages/03/86/fd1063a176ffb7b2315f9a1b08d17b18118b28d9df163132615b835a26ee/regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce", size = 868341 }, - { url = "https://files.pythonhosted.org/packages/12/43/103fb2e9811205e7386366501bc866a164a0430c79dd59eac886a2822950/regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd", size = 854666 }, - { url = "https://files.pythonhosted.org/packages/7d/22/e392e53f3869b75804762c7c848bd2dd2abf2b70fb0e526f58724638bd35/regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2", size = 799473 }, - { url = "https://files.pythonhosted.org/packages/4f/f9/8bd6b656592f925b6845fcbb4d57603a3ac2fb2373344ffa1ed70aa6820a/regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a", size = 268792 }, - { url = "https://files.pythonhosted.org/packages/e5/87/0e7d603467775ff65cd2aeabf1b5b50cc1c3708556a8b849a2fa4dd1542b/regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c", size = 280214 }, - { url = "https://files.pythonhosted.org/packages/8d/d0/2afc6f8e94e2b64bfb738a7c2b6387ac1699f09f032d363ed9447fd2bb57/regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e", size = 271469 }, - { url = "https://files.pythonhosted.org/packages/31/e9/f6e13de7e0983837f7b6d238ad9458800a874bf37c264f7923e63409944c/regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6", size = 489089 }, - { url = "https://files.pythonhosted.org/packages/a3/5c/261f4a262f1fa65141c1b74b255988bd2fa020cc599e53b080667d591cfc/regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4", size = 291059 }, - { url = "https://files.pythonhosted.org/packages/8e/57/f14eeb7f072b0e9a5a090d1712741fd8f214ec193dba773cf5410108bb7d/regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73", size = 288900 }, - { url = "https://files.pythonhosted.org/packages/3c/6b/1d650c45e99a9b327586739d926a1cd4e94666b1bd4af90428b36af66dc7/regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f", size = 799010 }, - { url = "https://files.pythonhosted.org/packages/99/ee/d66dcbc6b628ce4e3f7f0cbbb84603aa2fc0ffc878babc857726b8aab2e9/regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d", size = 864893 }, - { url = "https://files.pythonhosted.org/packages/bf/2d/f238229f1caba7ac87a6c4153d79947fb0261415827ae0f77c304260c7d3/regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be", size = 911522 }, - { url = "https://files.pythonhosted.org/packages/bd/3d/22a4eaba214a917c80e04f6025d26143690f0419511e0116508e24b11c9b/regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db", size = 803272 }, - { url = "https://files.pythonhosted.org/packages/84/b1/03188f634a409353a84b5ef49754b97dbcc0c0f6fd6c8ede505a8960a0a4/regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62", size = 787958 }, - { url = "https://files.pythonhosted.org/packages/99/6a/27d072f7fbf6fadd59c64d210305e1ff865cc3b78b526fd147db768c553b/regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f", size = 859289 }, - { url = "https://files.pythonhosted.org/packages/9a/70/1b3878f648e0b6abe023172dacb02157e685564853cc363d9961bcccde4e/regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02", size = 850026 }, - { url = "https://files.pythonhosted.org/packages/dd/d5/68e25559b526b8baab8e66839304ede68ff6727237a47727d240006bd0ff/regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed", size = 789499 }, - { url = "https://files.pythonhosted.org/packages/fc/df/43971264857140a350910d4e33df725e8c94dd9dee8d2e4729fa0d63d49e/regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4", size = 271604 }, - { url = "https://files.pythonhosted.org/packages/01/6f/9711b57dc6894a55faf80a4c1b5aa4f8649805cb9c7aef46f7d27e2b9206/regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad", size = 280320 }, - { url = "https://files.pythonhosted.org/packages/f1/7e/f6eaa207d4377481f5e1775cdeb5a443b5a59b392d0065f3417d31d80f87/regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f", size = 273372 }, - { url = "https://files.pythonhosted.org/packages/c3/06/49b198550ee0f5e4184271cee87ba4dfd9692c91ec55289e6282f0f86ccf/regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc", size = 491985 }, - { url = "https://files.pythonhosted.org/packages/ce/bf/abdafade008f0b1c9da10d934034cb670432d6cf6cbe38bbb53a1cfd6cf8/regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49", size = 292669 }, - { url = "https://files.pythonhosted.org/packages/f9/ef/0c357bb8edbd2ad8e273fcb9e1761bc37b8acbc6e1be050bebd6475f19c1/regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536", size = 291030 }, - { url = "https://files.pythonhosted.org/packages/79/06/edbb67257596649b8fb088d6aeacbcb248ac195714b18a65e018bf4c0b50/regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95", size = 807674 }, - { url = "https://files.pythonhosted.org/packages/f4/d9/ad4deccfce0ea336296bd087f1a191543bb99ee1c53093dcd4c64d951d00/regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009", size = 873451 }, - { url = "https://files.pythonhosted.org/packages/13/75/a55a4724c56ef13e3e04acaab29df26582f6978c000ac9cd6810ad1f341f/regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9", size = 914980 }, - { url = "https://files.pythonhosted.org/packages/67/1e/a1657ee15bd9116f70d4a530c736983eed997b361e20ecd8f5ca3759d5c5/regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d", size = 812852 }, - { url = "https://files.pythonhosted.org/packages/b8/6f/f7516dde5506a588a561d296b2d0044839de06035bb486b326065b4c101e/regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6", size = 795566 }, - { url = "https://files.pythonhosted.org/packages/d9/dd/3d10b9e170cc16fb34cb2cef91513cf3df65f440b3366030631b2984a264/regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154", size = 868463 }, - { url = "https://files.pythonhosted.org/packages/f5/8e/935e6beff1695aa9085ff83195daccd72acc82c81793df480f34569330de/regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267", size = 854694 }, - { url = "https://files.pythonhosted.org/packages/92/12/10650181a040978b2f5720a6a74d44f841371a3d984c2083fc1752e4acf6/regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379", size = 799691 }, - { url = "https://files.pythonhosted.org/packages/67/90/8f37138181c9a7690e7e4cb388debbd389342db3c7381d636d2875940752/regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38", size = 274583 }, - { url = "https://files.pythonhosted.org/packages/8f/cd/867f5ec442d56beb56f5f854f40abcfc75e11d10b11fdb1869dd39c63aaf/regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de", size = 284286 }, - { url = "https://files.pythonhosted.org/packages/20/31/32c0c4610cbc070362bf1d2e4ea86d1ea29014d400a6d6c2486fcfd57766/regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801", size = 274741 }, -] - [[package]] name = "requests" version = "2.32.5" @@ -1296,67 +1187,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/46/f5af3402b579fd5e11573ce652019a67074317e18c1935cc0b4ba9b35552/secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137", size = 15554 }, ] -[[package]] -name = "tiktoken" -version = "0.12.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "regex" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/4d017d0f76ec3171d469d80fc03dfbb4e48a4bcaddaa831b31d526f05edc/tiktoken-0.12.0.tar.gz", hash = "sha256:b18ba7ee2b093863978fcb14f74b3707cdc8d4d4d3836853ce7ec60772139931", size = 37806 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/89/b3/2cb7c17b6c4cf8ca983204255d3f1d95eda7213e247e6947a0ee2c747a2c/tiktoken-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3de02f5a491cfd179aec916eddb70331814bd6bf764075d39e21d5862e533970", size = 1051991 }, - { url = "https://files.pythonhosted.org/packages/27/0f/df139f1df5f6167194ee5ab24634582ba9a1b62c6b996472b0277ec80f66/tiktoken-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b6cfb6d9b7b54d20af21a912bfe63a2727d9cfa8fbda642fd8322c70340aad16", size = 995798 }, - { url = "https://files.pythonhosted.org/packages/ef/5d/26a691f28ab220d5edc09b9b787399b130f24327ef824de15e5d85ef21aa/tiktoken-0.12.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:cde24cdb1b8a08368f709124f15b36ab5524aac5fa830cc3fdce9c03d4fb8030", size = 1129865 }, - { url = "https://files.pythonhosted.org/packages/b2/94/443fab3d4e5ebecac895712abd3849b8da93b7b7dec61c7db5c9c7ebe40c/tiktoken-0.12.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6de0da39f605992649b9cfa6f84071e3f9ef2cec458d08c5feb1b6f0ff62e134", size = 1152856 }, - { url = "https://files.pythonhosted.org/packages/54/35/388f941251b2521c70dd4c5958e598ea6d2c88e28445d2fb8189eecc1dfc/tiktoken-0.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6faa0534e0eefbcafaccb75927a4a380463a2eaa7e26000f0173b920e98b720a", size = 1195308 }, - { url = "https://files.pythonhosted.org/packages/f8/00/c6681c7f833dd410576183715a530437a9873fa910265817081f65f9105f/tiktoken-0.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:82991e04fc860afb933efb63957affc7ad54f83e2216fe7d319007dab1ba5892", size = 1255697 }, - { url = "https://files.pythonhosted.org/packages/5f/d2/82e795a6a9bafa034bf26a58e68fe9a89eeaaa610d51dbeb22106ba04f0a/tiktoken-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:6fb2995b487c2e31acf0a9e17647e3b242235a20832642bb7a9d1a181c0c1bb1", size = 879375 }, - { url = "https://files.pythonhosted.org/packages/de/46/21ea696b21f1d6d1efec8639c204bdf20fde8bafb351e1355c72c5d7de52/tiktoken-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e227c7f96925003487c33b1b32265fad2fbcec2b7cf4817afb76d416f40f6bb", size = 1051565 }, - { url = "https://files.pythonhosted.org/packages/c9/d9/35c5d2d9e22bb2a5f74ba48266fb56c63d76ae6f66e02feb628671c0283e/tiktoken-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c06cf0fcc24c2cb2adb5e185c7082a82cba29c17575e828518c2f11a01f445aa", size = 995284 }, - { url = "https://files.pythonhosted.org/packages/01/84/961106c37b8e49b9fdcf33fe007bb3a8fdcc380c528b20cc7fbba80578b8/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f18f249b041851954217e9fd8e5c00b024ab2315ffda5ed77665a05fa91f42dc", size = 1129201 }, - { url = "https://files.pythonhosted.org/packages/6a/d0/3d9275198e067f8b65076a68894bb52fd253875f3644f0a321a720277b8a/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:47a5bc270b8c3db00bb46ece01ef34ad050e364b51d406b6f9730b64ac28eded", size = 1152444 }, - { url = "https://files.pythonhosted.org/packages/78/db/a58e09687c1698a7c592e1038e01c206569b86a0377828d51635561f8ebf/tiktoken-0.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:508fa71810c0efdcd1b898fda574889ee62852989f7c1667414736bcb2b9a4bd", size = 1195080 }, - { url = "https://files.pythonhosted.org/packages/9e/1b/a9e4d2bf91d515c0f74afc526fd773a812232dd6cda33ebea7f531202325/tiktoken-0.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1af81a6c44f008cba48494089dd98cccb8b313f55e961a52f5b222d1e507967", size = 1255240 }, - { url = "https://files.pythonhosted.org/packages/9d/15/963819345f1b1fb0809070a79e9dd96938d4ca41297367d471733e79c76c/tiktoken-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:3e68e3e593637b53e56f7237be560f7a394451cb8c11079755e80ae64b9e6def", size = 879422 }, - { url = "https://files.pythonhosted.org/packages/a4/85/be65d39d6b647c79800fd9d29241d081d4eeb06271f383bb87200d74cf76/tiktoken-0.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b97f74aca0d78a1ff21b8cd9e9925714c15a9236d6ceacf5c7327c117e6e21e8", size = 1050728 }, - { url = "https://files.pythonhosted.org/packages/4a/42/6573e9129bc55c9bf7300b3a35bef2c6b9117018acca0dc760ac2d93dffe/tiktoken-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b90f5ad190a4bb7c3eb30c5fa32e1e182ca1ca79f05e49b448438c3e225a49b", size = 994049 }, - { url = "https://files.pythonhosted.org/packages/66/c5/ed88504d2f4a5fd6856990b230b56d85a777feab84e6129af0822f5d0f70/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:65b26c7a780e2139e73acc193e5c63ac754021f160df919add909c1492c0fb37", size = 1129008 }, - { url = "https://files.pythonhosted.org/packages/f4/90/3dae6cc5436137ebd38944d396b5849e167896fc2073da643a49f372dc4f/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:edde1ec917dfd21c1f2f8046b86348b0f54a2c0547f68149d8600859598769ad", size = 1152665 }, - { url = "https://files.pythonhosted.org/packages/a3/fe/26df24ce53ffde419a42f5f53d755b995c9318908288c17ec3f3448313a3/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:35a2f8ddd3824608b3d650a000c1ef71f730d0c56486845705a8248da00f9fe5", size = 1194230 }, - { url = "https://files.pythonhosted.org/packages/20/cc/b064cae1a0e9fac84b0d2c46b89f4e57051a5f41324e385d10225a984c24/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83d16643edb7fa2c99eff2ab7733508aae1eebb03d5dfc46f5565862810f24e3", size = 1254688 }, - { url = "https://files.pythonhosted.org/packages/81/10/b8523105c590c5b8349f2587e2fdfe51a69544bd5a76295fc20f2374f470/tiktoken-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc5288f34a8bc02e1ea7047b8d041104791d2ddbf42d1e5fa07822cbffe16bd", size = 878694 }, - { url = "https://files.pythonhosted.org/packages/00/61/441588ee21e6b5cdf59d6870f86beb9789e532ee9718c251b391b70c68d6/tiktoken-0.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:775c2c55de2310cc1bc9a3ad8826761cbdc87770e586fd7b6da7d4589e13dab3", size = 1050802 }, - { url = "https://files.pythonhosted.org/packages/1f/05/dcf94486d5c5c8d34496abe271ac76c5b785507c8eae71b3708f1ad9b45a/tiktoken-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a01b12f69052fbe4b080a2cfb867c4de12c704b56178edf1d1d7b273561db160", size = 993995 }, - { url = "https://files.pythonhosted.org/packages/a0/70/5163fe5359b943f8db9946b62f19be2305de8c3d78a16f629d4165e2f40e/tiktoken-0.12.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:01d99484dc93b129cd0964f9d34eee953f2737301f18b3c7257bf368d7615baa", size = 1128948 }, - { url = "https://files.pythonhosted.org/packages/0c/da/c028aa0babf77315e1cef357d4d768800c5f8a6de04d0eac0f377cb619fa/tiktoken-0.12.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:4a1a4fcd021f022bfc81904a911d3df0f6543b9e7627b51411da75ff2fe7a1be", size = 1151986 }, - { url = "https://files.pythonhosted.org/packages/a0/5a/886b108b766aa53e295f7216b509be95eb7d60b166049ce2c58416b25f2a/tiktoken-0.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:981a81e39812d57031efdc9ec59fa32b2a5a5524d20d4776574c4b4bd2e9014a", size = 1194222 }, - { url = "https://files.pythonhosted.org/packages/f4/f8/4db272048397636ac7a078d22773dd2795b1becee7bc4922fe6207288d57/tiktoken-0.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9baf52f84a3f42eef3ff4e754a0db79a13a27921b457ca9832cf944c6be4f8f3", size = 1255097 }, - { url = "https://files.pythonhosted.org/packages/8e/32/45d02e2e0ea2be3a9ed22afc47d93741247e75018aac967b713b2941f8ea/tiktoken-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:b8a0cd0c789a61f31bf44851defbd609e8dd1e2c8589c614cc1060940ef1f697", size = 879117 }, - { url = "https://files.pythonhosted.org/packages/ce/76/994fc868f88e016e6d05b0da5ac24582a14c47893f4474c3e9744283f1d5/tiktoken-0.12.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d5f89ea5680066b68bcb797ae85219c72916c922ef0fcdd3480c7d2315ffff16", size = 1050309 }, - { url = "https://files.pythonhosted.org/packages/f6/b8/57ef1456504c43a849821920d582a738a461b76a047f352f18c0b26c6516/tiktoken-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b4e7ed1c6a7a8a60a3230965bdedba8cc58f68926b835e519341413370e0399a", size = 993712 }, - { url = "https://files.pythonhosted.org/packages/72/90/13da56f664286ffbae9dbcfadcc625439142675845baa62715e49b87b68b/tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:fc530a28591a2d74bce821d10b418b26a094bf33839e69042a6e86ddb7a7fb27", size = 1128725 }, - { url = "https://files.pythonhosted.org/packages/05/df/4f80030d44682235bdaecd7346c90f67ae87ec8f3df4a3442cb53834f7e4/tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:06a9f4f49884139013b138920a4c393aa6556b2f8f536345f11819389c703ebb", size = 1151875 }, - { url = "https://files.pythonhosted.org/packages/22/1f/ae535223a8c4ef4c0c1192e3f9b82da660be9eb66b9279e95c99288e9dab/tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:04f0e6a985d95913cabc96a741c5ffec525a2c72e9df086ff17ebe35985c800e", size = 1194451 }, - { url = "https://files.pythonhosted.org/packages/78/a7/f8ead382fce0243cb625c4f266e66c27f65ae65ee9e77f59ea1653b6d730/tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0ee8f9ae00c41770b5f9b0bb1235474768884ae157de3beb5439ca0fd70f3e25", size = 1253794 }, - { url = "https://files.pythonhosted.org/packages/93/e0/6cc82a562bc6365785a3ff0af27a2a092d57c47d7a81d9e2295d8c36f011/tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f", size = 878777 }, - { url = "https://files.pythonhosted.org/packages/72/05/3abc1db5d2c9aadc4d2c76fa5640134e475e58d9fbb82b5c535dc0de9b01/tiktoken-0.12.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a90388128df3b3abeb2bfd1895b0681412a8d7dc644142519e6f0a97c2111646", size = 1050188 }, - { url = "https://files.pythonhosted.org/packages/e3/7b/50c2f060412202d6c95f32b20755c7a6273543b125c0985d6fa9465105af/tiktoken-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:da900aa0ad52247d8794e307d6446bd3cdea8e192769b56276695d34d2c9aa88", size = 993978 }, - { url = "https://files.pythonhosted.org/packages/14/27/bf795595a2b897e271771cd31cb847d479073497344c637966bdf2853da1/tiktoken-0.12.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:285ba9d73ea0d6171e7f9407039a290ca77efcdb026be7769dccc01d2c8d7fff", size = 1129271 }, - { url = "https://files.pythonhosted.org/packages/f5/de/9341a6d7a8f1b448573bbf3425fa57669ac58258a667eb48a25dfe916d70/tiktoken-0.12.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:d186a5c60c6a0213f04a7a802264083dea1bbde92a2d4c7069e1a56630aef830", size = 1151216 }, - { url = "https://files.pythonhosted.org/packages/75/0d/881866647b8d1be4d67cb24e50d0c26f9f807f994aa1510cb9ba2fe5f612/tiktoken-0.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:604831189bd05480f2b885ecd2d1986dc7686f609de48208ebbbddeea071fc0b", size = 1194860 }, - { url = "https://files.pythonhosted.org/packages/b3/1e/b651ec3059474dab649b8d5b69f5c65cd8fcd8918568c1935bd4136c9392/tiktoken-0.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8f317e8530bb3a222547b85a58583238c8f74fd7a7408305f9f63246d1a0958b", size = 1254567 }, - { url = "https://files.pythonhosted.org/packages/80/57/ce64fd16ac390fafde001268c364d559447ba09b509181b2808622420eec/tiktoken-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:399c3dd672a6406719d84442299a490420b458c44d3ae65516302a99675888f3", size = 921067 }, - { url = "https://files.pythonhosted.org/packages/ac/a4/72eed53e8976a099539cdd5eb36f241987212c29629d0a52c305173e0a68/tiktoken-0.12.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2c714c72bc00a38ca969dae79e8266ddec999c7ceccd603cc4f0d04ccd76365", size = 1050473 }, - { url = "https://files.pythonhosted.org/packages/e6/d7/0110b8f54c008466b19672c615f2168896b83706a6611ba6e47313dbc6e9/tiktoken-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cbb9a3ba275165a2cb0f9a83f5d7025afe6b9d0ab01a22b50f0e74fee2ad253e", size = 993855 }, - { url = "https://files.pythonhosted.org/packages/5f/77/4f268c41a3957c418b084dd576ea2fad2e95da0d8e1ab705372892c2ca22/tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:dfdfaa5ffff8993a3af94d1125870b1d27aed7cb97aa7eb8c1cefdbc87dbee63", size = 1129022 }, - { url = "https://files.pythonhosted.org/packages/4e/2b/fc46c90fe5028bd094cd6ee25a7db321cb91d45dc87531e2bdbb26b4867a/tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:584c3ad3d0c74f5269906eb8a659c8bfc6144a52895d9261cdaf90a0ae5f4de0", size = 1150736 }, - { url = "https://files.pythonhosted.org/packages/28/c0/3c7a39ff68022ddfd7d93f3337ad90389a342f761c4d71de99a3ccc57857/tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:54c891b416a0e36b8e2045b12b33dd66fb34a4fe7965565f1b482da50da3e86a", size = 1194908 }, - { url = "https://files.pythonhosted.org/packages/ab/0d/c1ad6f4016a3968c048545f5d9b8ffebf577774b2ede3e2e352553b685fe/tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5edb8743b88d5be814b1a8a8854494719080c28faaa1ccbef02e87354fe71ef0", size = 1253706 }, - { url = "https://files.pythonhosted.org/packages/af/df/c7891ef9d2712ad774777271d39fdef63941ffba0a9d59b7ad1fd2765e57/tiktoken-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:f61c0aea5565ac82e2ec50a05e02a6c44734e91b51c10510b084ea1b8e633a71", size = 920667 }, -] - [[package]] name = "tomli" version = "2.3.0"