diff --git a/lib/crewai/src/crewai/knowledge/source/base_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/base_knowledge_source.py index b62dd0f04b..5862a44743 100644 --- a/lib/crewai/src/crewai/knowledge/source/base_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/base_knowledge_source.py @@ -12,7 +12,7 @@ class BaseKnowledgeSource(BaseModel, ABC): chunk_size: int = 4000 chunk_overlap: int = 200 - chunks: list[str] = Field(default_factory=list) + chunks: list[dict[str, Any]] = Field(default_factory=list) chunk_embeddings: list[np.ndarray] = Field(default_factory=list) model_config = ConfigDict(arbitrary_types_allowed=True) @@ -39,7 +39,7 @@ def _chunk_text(self, text: str) -> list[str]: for i in range(0, len(text), self.chunk_size - self.chunk_overlap) ] - def _save_documents(self): + def _save_documents(self) -> None: """ Save the documents to the storage. This method should be called after the chunks and embeddings are generated. diff --git a/lib/crewai/src/crewai/knowledge/source/crew_docling_source.py b/lib/crewai/src/crewai/knowledge/source/crew_docling_source.py index 9061fe3fd9..e0a4954860 100644 --- a/lib/crewai/src/crewai/knowledge/source/crew_docling_source.py +++ b/lib/crewai/src/crewai/knowledge/source/crew_docling_source.py @@ -1,42 +1,53 @@ from __future__ import annotations from collections.abc import Iterator +import importlib from pathlib import Path +from typing import Any from urllib.parse import urlparse +# --- third-party/optional imports (OK to keep in try/except) --- try: from docling.datamodel.base_models import ( # type: ignore[import-not-found] InputFormat, ) - from docling.document_converter import ( # type: ignore[import-not-found] - DocumentConverter, - ) - from docling.exceptions import ConversionError # type: ignore[import-not-found] from docling_core.transforms.chunker.hierarchical_chunker import ( # type: ignore[import-not-found] HierarchicalChunker, ) - from docling_core.types.doc.document import ( # type: ignore[import-not-found] - DoclingDocument, - ) DOCLING_AVAILABLE = True except ImportError: DOCLING_AVAILABLE = False -from pydantic import Field +# Ensure the converter module is present too; otherwise the flag is misleading. +if DOCLING_AVAILABLE: + import importlib.util as _ilu + + if ( + _ilu.find_spec("docling.document_converter") is None + or _ilu.find_spec("docling.exceptions") is None + ): + DOCLING_AVAILABLE = False + +# --- regular imports must stay together, before any non-import statements --- +from pydantic import Field, PrivateAttr from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.utilities.constants import KNOWLEDGE_DIRECTORY from crewai.utilities.logger import Logger +# Safe default; will be overwritten at runtime if docling is present +DoclingConversionError: type[BaseException] | None = None + + class CrewDoclingSource(BaseKnowledgeSource): """Default Source class for converting documents to markdown or json This will auto support PDF, DOCX, and TXT, XLSX, Images, and HTML files without any additional dependencies and follows the docling package as the source of truth. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: if not DOCLING_AVAILABLE: raise ImportError( "The docling package is required to use CrewDoclingSource. " @@ -48,11 +59,57 @@ def __init__(self, *args, **kwargs): file_path: list[Path | str] | None = Field(default=None) file_paths: list[Path | str] = Field(default_factory=list) - chunks: list[str] = Field(default_factory=list) + chunks: list[dict[str, Any]] = Field(default_factory=list) safe_file_paths: list[Path | str] = Field(default_factory=list) - content: list[DoclingDocument] = Field(default_factory=list) - document_converter: DocumentConverter = Field( - default_factory=lambda: DocumentConverter( + content: list[Any] = Field(default_factory=list) + _aligned_paths: list[Path | str] = PrivateAttr(default_factory=list) + document_converter: Any = Field(default=None) + + def model_post_init(self, __context: Any) -> None: + if self.file_path: + self._logger.log( + "warning", + "The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.", + color="yellow", + ) + self.file_paths = self.file_path + + self.safe_file_paths = self.validate_content() + + # Import docling pieces dynamically to avoid mypy missing-import issues. + try: + docling_mod = importlib.import_module("docling.document_converter") + except Exception as e: + raise ImportError( + "docling is partially installed: 'docling.document_converter' not found." + "Please install/upgrade docling: `uv add docling` ." + ) from e + document_converter_cls = docling_mod.DocumentConverter + + # Resolve ConversionError dynamically (no static import) + try: + exc_mod = importlib.import_module("docling.exceptions") + exc_cls = getattr(exc_mod, "ConversionError", None) + if isinstance(exc_cls, type) and issubclass(exc_cls, BaseException): + global DoclingConversionError + DoclingConversionError = exc_cls + else: + self._logger.log( + "warning", + "docling.exceptions.ConversionError not found or invalid; using generic handling.", + color="yellow", + ) + DoclingConversionError = None + except Exception as err: + # Log instead of bare `pass` to satisfy ruff S110 + self._logger.log( + "warning", + f"docling.exceptions not available ({err!s}); using generic handling.", + color="yellow", + ) + DoclingConversionError = None + + self.document_converter = document_converter_cls( allowed_formats=[ InputFormat.MD, InputFormat.ASCIIDOC, @@ -62,48 +119,94 @@ def __init__(self, *args, **kwargs): InputFormat.IMAGE, InputFormat.XLSX, InputFormat.PPTX, + InputFormat.CSV, ] ) - ) - - def model_post_init(self, _) -> None: - if self.file_path: - self._logger.log( - "warning", - "The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.", - color="yellow", - ) - self.file_paths = self.file_path - self.safe_file_paths = self.validate_content() self.content = self._load_content() - def _load_content(self) -> list[DoclingDocument]: + def _load_content(self) -> list[Any]: try: return self._convert_source_to_docling_documents() - except ConversionError as e: - self._logger.log( - "error", - f"Error loading content: {e}. Supported formats: {self.document_converter.allowed_formats}", - "red", - ) - raise e except Exception as e: - self._logger.log("error", f"Error loading content: {e}") - raise e + if DoclingConversionError is not None and isinstance( + e, DoclingConversionError + ): + self._logger.log( + "error", + f"Error loading content: {e}. Supported formats: {self.document_converter.allowed_formats}", + "red", + ) + else: + self._logger.log("error", f"Error loading content: {e}") + raise def add(self) -> None: - if self.content is None: + """Convert each document to chunks, attach filepath metadata, and persist.""" + if not self.content: return - for doc in self.content: - new_chunks_iterable = self._chunk_doc(doc) - self.chunks.extend(list(new_chunks_iterable)) - self._save_documents() - def _convert_source_to_docling_documents(self) -> list[DoclingDocument]: - conv_results_iter = self.document_converter.convert_all(self.safe_file_paths) - return [result.document for result in conv_results_iter] + for filepath, doc in zip(self._aligned_paths, self.content, strict=True): + chunk_idx = 0 + for chunk in self._chunk_doc(doc): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "chunk_index": chunk_idx, + "source_type": "docling", + }, + } + ) + chunk_idx += 1 - def _chunk_doc(self, doc: DoclingDocument) -> Iterator[str]: + self._save_documents() + + def _convert_one(self, fp: Path | str) -> tuple[Any, Path | str] | None: + """Convert a single file; on failure, log and return None.""" + try: + result = self.document_converter.convert(fp) + return result.document, fp + except Exception as e: + if DoclingConversionError is not None and isinstance( + e, DoclingConversionError + ): + self._logger.log( + "warning", + f"Skipping {fp!s}: conversion failed with {e!s}", + color="yellow", + ) + else: + self._logger.log( + "warning", + f"Skipping {fp!s}: unexpected error during conversion {e!s}", + color="yellow", + ) + return None + + def _convert_source_to_docling_documents(self) -> list[Any]: + """ + Convert files one-by-one to preserve (filepath, document) alignment. + + Any file that fails conversion is skipped (with a warning). For all successful + conversions, we maintain a parallel list of source paths so the add() step can + attach correct per-chunk filepath metadata without relying on zip truncation. + """ + aligned_docs: list[Any] = [] + aligned_paths: list[Path | str] = [] + + for fp in self.safe_file_paths: + item = self._convert_one(fp) + if item is None: + continue + doc, aligned_fp = item + aligned_docs.append(doc) + aligned_paths.append(aligned_fp) + + self._aligned_paths = aligned_paths + return aligned_docs + + def _chunk_doc(self, doc: Any) -> Iterator[str]: chunker = HierarchicalChunker() for chunk in chunker.chunk(doc): yield chunk.text @@ -127,7 +230,6 @@ def validate_content(self) -> list[Path | str]: else: raise FileNotFoundError(f"File not found: {local_path}") else: - # this is an instance of Path processed_paths.append(path) return processed_paths @@ -138,7 +240,7 @@ def _validate_url(self, url: str) -> bool: [ result.scheme in ("http", "https"), result.netloc, - len(result.netloc.split(".")) >= 2, # Ensure domain has TLD + len(result.netloc.split(".")) >= 2, ] ) except Exception: diff --git a/lib/crewai/src/crewai/knowledge/source/csv_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/csv_knowledge_source.py index dc7401598b..c40bc2063c 100644 --- a/lib/crewai/src/crewai/knowledge/source/csv_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/csv_knowledge_source.py @@ -21,15 +21,25 @@ def load_content(self) -> dict[Path, str]: def add(self) -> None: """ - Add CSV file content to the knowledge source, chunk it, compute embeddings, - and save the embeddings. + Add CSV file content to the knowledge source, chunk it per file, + attach filepath metadata, and persist via the configured storage. """ - content_str = ( - str(self.content) if isinstance(self.content, dict) else self.content - ) - new_chunks = self._chunk_text(content_str) - self.chunks.extend(new_chunks) - self._save_documents() + for filepath, text in self.content.items(): + chunk_idx = 0 + content_str = str(text) + for chunk in self._chunk_text(content_str): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "chunk_index": chunk_idx, + "source_type": "csv", + }, + } + ) + chunk_idx += 1 + self._save_documents() # type: ignore[no-untyped-call] def _chunk_text(self, text: str) -> list[str]: """Utility method to split text into chunks.""" diff --git a/lib/crewai/src/crewai/knowledge/source/excel_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/excel_knowledge_source.py index 3c33e88031..30af365be0 100644 --- a/lib/crewai/src/crewai/knowledge/source/excel_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/excel_knowledge_source.py @@ -1,4 +1,5 @@ from pathlib import Path +from typing import Any from pydantic import Field, field_validator @@ -21,12 +22,12 @@ class ExcelKnowledgeSource(BaseKnowledgeSource): file_paths: Path | list[Path] | str | list[str] | None = Field( default_factory=list, description="The path to the file" ) - chunks: list[str] = Field(default_factory=list) + chunks: list[dict[str, Any]] = Field(default_factory=list) content: dict[Path, dict[str, str]] = Field(default_factory=dict) safe_file_paths: list[Path] = Field(default_factory=list) @field_validator("file_path", "file_paths", mode="before") - def validate_file_path(cls, v, info): # noqa: N805 + def validate_file_path(cls, v: Any, info: Any) -> Any: # noqa: N805 """Validate that at least one of file_path or file_paths is provided.""" # Single check if both are None, O(1) instead of nested conditions if ( @@ -69,7 +70,7 @@ def _process_file_paths(self) -> list[Path]: return [self.convert_to_path(path) for path in path_list] - def validate_content(self): + def validate_content(self) -> None: """Validate the paths.""" for path in self.safe_file_paths: if not path.exists(): @@ -86,7 +87,7 @@ def validate_content(self): color="red", ) - def model_post_init(self, _) -> None: + def model_post_init(self, __context: Any) -> None: if self.file_path: self._logger.log( "warning", @@ -128,10 +129,10 @@ def convert_to_path(self, path: Path | str) -> Path: """Convert a path to a Path object.""" return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path - def _import_dependencies(self): + def _import_dependencies(self) -> Any: """Dynamically import dependencies.""" try: - import pandas as pd # type: ignore[import-untyped,import-not-found] + import pandas as pd # type: ignore[import-untyped] return pd except ImportError as e: @@ -142,21 +143,25 @@ def _import_dependencies(self): def add(self) -> None: """ - Add Excel file content to the knowledge source, chunk it, compute embeddings, - and save the embeddings. + Add Excel file content to the knowledge source, chunk it per sheet, + attach filepath & sheet metadata, and persist via the configured storage. """ - # Convert dictionary values to a single string if content is a dictionary - # Updated to account for .xlsx workbooks with multiple tabs/sheets - content_str = "" - for value in self.content.values(): - if isinstance(value, dict): - for sheet_value in value.values(): - content_str += str(sheet_value) + "\n" - else: - content_str += str(value) + "\n" - - new_chunks = self._chunk_text(content_str) - self.chunks.extend(new_chunks) + for filepath, sheets in self.content.items(): + for sheet_name, sheet_csv_str in sheets.items(): + chunk_idx = 0 + for chunk in self._chunk_text(sheet_csv_str): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "sheet_name": str(sheet_name), + "chunk_index": chunk_idx, + "source_type": "excel", + }, + } + ) + chunk_idx += 1 self._save_documents() def _chunk_text(self, text: str) -> list[str]: diff --git a/lib/crewai/src/crewai/knowledge/source/json_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/json_knowledge_source.py index 0e5c847e2f..1cda1f46d3 100644 --- a/lib/crewai/src/crewai/knowledge/source/json_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/json_knowledge_source.py @@ -34,15 +34,24 @@ def _json_to_text(self, data: Any, level: int = 0) -> str: def add(self) -> None: """ - Add JSON file content to the knowledge source, chunk it, compute embeddings, - and save the embeddings. + Add JSON file content to the knowledge source, chunk it per file, + attach filepath metadata, and persist via the configured storage. """ - content_str = ( - str(self.content) if isinstance(self.content, dict) else self.content - ) - new_chunks = self._chunk_text(content_str) - self.chunks.extend(new_chunks) - self._save_documents() + for filepath, text in self.content.items(): + chunk_idx = 0 + for chunk in self._chunk_text(text): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "chunk_index": chunk_idx, + "source_type": "json", + }, + } + ) + chunk_idx += 1 + self._save_documents() # type: ignore[no-untyped-call] def _chunk_text(self, text: str) -> list[str]: """Utility method to split text into chunks.""" diff --git a/lib/crewai/src/crewai/knowledge/source/pdf_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/pdf_knowledge_source.py index 7fa663b920..eae5324d35 100644 --- a/lib/crewai/src/crewai/knowledge/source/pdf_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/pdf_knowledge_source.py @@ -1,4 +1,5 @@ from pathlib import Path +from typing import Any from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource @@ -23,7 +24,7 @@ def load_content(self) -> dict[Path, str]: content[path] = text return content - def _import_pdfplumber(self): + def _import_pdfplumber(self) -> Any: """Dynamically import pdfplumber.""" try: import pdfplumber @@ -36,13 +37,24 @@ def _import_pdfplumber(self): def add(self) -> None: """ - Add PDF file content to the knowledge source, chunk it, compute embeddings, + Add PDF file content to the knowledge source, chunk it, add metadata, compute embeddings, and save the embeddings. """ - for text in self.content.values(): - new_chunks = self._chunk_text(text) - self.chunks.extend(new_chunks) - self._save_documents() + for filepath, text in self.content.items(): + chunk_idx = 0 + for chunk in self._chunk_text(text): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "chunk_index": chunk_idx, + "source_type": "pdf", + }, + } + ) + chunk_idx += 1 + self._save_documents() # type: ignore[no-untyped-call] def _chunk_text(self, text: str) -> list[str]: """Utility method to split text into chunks.""" diff --git a/lib/crewai/src/crewai/knowledge/source/string_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/string_knowledge_source.py index 97473d9d37..7d1f55f011 100644 --- a/lib/crewai/src/crewai/knowledge/source/string_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/string_knowledge_source.py @@ -1,3 +1,5 @@ +from typing import Any + from pydantic import Field from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource @@ -9,19 +11,33 @@ class StringKnowledgeSource(BaseKnowledgeSource): content: str = Field(...) collection_name: str | None = Field(default=None) - def model_post_init(self, _): + def model_post_init(self, __context: Any) -> None: """Post-initialization method to validate content.""" self.validate_content() - def validate_content(self): + def validate_content(self) -> None: """Validate string content.""" if not isinstance(self.content, str): raise ValueError("StringKnowledgeSource only accepts string content") def add(self) -> None: - """Add string content to the knowledge source, chunk it, compute embeddings, and save them.""" - new_chunks = self._chunk_text(self.content) - self.chunks.extend(new_chunks) + """ + Add string content to the knowledge source, chunk it, + attach metadata, and persist via the configured storage. + """ + chunk_idx = 0 + for chunk in self._chunk_text(self.content): + metadata: dict[str, Any] = { + "source_type": "string", + "chunk_index": chunk_idx, + } + self.chunks.append( + { + "content": chunk, + "metadata": metadata, + } + ) + chunk_idx += 1 self._save_documents() def _chunk_text(self, text: str) -> list[str]: diff --git a/lib/crewai/src/crewai/knowledge/source/text_file_knowledge_source.py b/lib/crewai/src/crewai/knowledge/source/text_file_knowledge_source.py index 93a3e2849e..d4c4a8816a 100644 --- a/lib/crewai/src/crewai/knowledge/source/text_file_knowledge_source.py +++ b/lib/crewai/src/crewai/knowledge/source/text_file_knowledge_source.py @@ -17,13 +17,24 @@ def load_content(self) -> dict[Path, str]: def add(self) -> None: """ - Add text file content to the knowledge source, chunk it, compute embeddings, - and save the embeddings. + Add text file content to the knowledge source, chunk it per file, + attach filepath metadata, and persist via the configured storage. """ - for text in self.content.values(): - new_chunks = self._chunk_text(text) - self.chunks.extend(new_chunks) - self._save_documents() + for filepath, text in self.content.items(): + chunk_idx = 0 + for chunk in self._chunk_text(text): + self.chunks.append( + { + "content": chunk, + "metadata": { + "filepath": str(filepath), + "chunk_index": chunk_idx, + "source_type": "text", + }, + } + ) + chunk_idx += 1 + self._save_documents() # type: ignore[no-untyped-call] def _chunk_text(self, text: str) -> list[str]: """Utility method to split text into chunks.""" diff --git a/lib/crewai/src/crewai/knowledge/storage/knowledge_storage.py b/lib/crewai/src/crewai/knowledge/storage/knowledge_storage.py index 7eed0e0deb..f8278b0116 100644 --- a/lib/crewai/src/crewai/knowledge/storage/knowledge_storage.py +++ b/lib/crewai/src/crewai/knowledge/storage/knowledge_storage.py @@ -1,3 +1,4 @@ +from collections.abc import Mapping, Sequence import logging import traceback from typing import Any, cast @@ -16,6 +17,71 @@ from crewai.utilities.logger import Logger +def _coerce_to_records(documents: Sequence[Any]) -> list[BaseRecord]: + records: list[BaseRecord] = [] + for d in documents: + if isinstance(d, str): + records.append({"content": d}) + elif isinstance(d, Mapping): + # Only process dict-like inputs that explicitly provide "content" + if "content" not in d: + continue + + raw_content = d.get("content", "") + # Coerce to str to satisfy BaseRecord; None -> "", others -> str(value) + content_str: str = "" if raw_content is None else str(raw_content) + + metadata_raw = d.get("metadata", {}) + + # Prepare metadata in one of the two shapes allowed by BaseRecord: + # Mapping[str, str|int|float|bool] OR list[Mapping[str, str|int|float|bool]] + meta_m: Mapping[str, str | int | float | bool] | None = None + meta_l: list[Mapping[str, str | int | float | bool]] | None = None + + if isinstance(metadata_raw, Mapping): + sanitized = { + str(k): ( + v + if isinstance(v, (str, int, float, bool)) + else ("" if v is None else str(v)) + ) + for k, v in metadata_raw.items() + } + meta_m = cast(Mapping[str, str | int | float | bool], sanitized) + + elif isinstance(metadata_raw, list): + sanitized_list: list[Mapping[str, str | int | float | bool]] = [] + for m in metadata_raw: + if isinstance(m, Mapping): + sanitized_m = { + str(k): ( + v + if isinstance(v, (str, int, float, bool)) + else ("" if v is None else str(v)) + ) + for k, v in m.items() + } + sanitized_list.append( + cast(Mapping[str, str | int | float | bool], sanitized_m) + ) + meta_l = sanitized_list + + rec: BaseRecord = {"content": content_str} + if meta_m is not None: + rec["metadata"] = meta_m + elif meta_l is not None: + rec["metadata"] = meta_l + + if "doc_id" in d and isinstance(d["doc_id"], str): + rec["doc_id"] = d["doc_id"] + + records.append(rec) + else: + # Ignore unsupported shapes + continue + return records + + class KnowledgeStorage(BaseKnowledgeStorage): """ Extends Storage to handle embeddings for memory entries, improving @@ -25,8 +91,8 @@ class KnowledgeStorage(BaseKnowledgeStorage): def __init__( self, embedder: ProviderSpec - | BaseEmbeddingsProvider - | type[BaseEmbeddingsProvider] + | BaseEmbeddingsProvider[Any] + | type[BaseEmbeddingsProvider[Any]] | None = None, collection_name: str | None = None, ) -> None: @@ -98,7 +164,7 @@ def reset(self) -> None: f"Error during knowledge reset: {e!s}\n{traceback.format_exc()}" ) - def save(self, documents: list[str]) -> None: + def save(self, documents: list[Any]) -> None: try: client = self._get_client() collection_name = ( @@ -108,8 +174,8 @@ def save(self, documents: list[str]) -> None: ) client.get_or_create_collection(collection_name=collection_name) - rag_documents: list[BaseRecord] = [{"content": doc} for doc in documents] - + # Accept both old (list[str]) and new (list[dict]) chunk formats + rag_documents: list[BaseRecord] = _coerce_to_records(documents) client.add_documents( collection_name=collection_name, documents=rag_documents ) diff --git a/pyproject.toml b/pyproject.toml index cee6a04e0f..af85bf5cdb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ dev = [ "boto3-stubs[bedrock-runtime]>=1.40.54", "types-psycopg2>=2.9.21.20251012", "types-pymysql>=1.1.0.20250916", + "pip>=25.2", ] diff --git a/uv.lock b/uv.lock index 21a3db9a3a..dd9908df38 100644 --- a/uv.lock +++ b/uv.lock @@ -40,6 +40,7 @@ dev = [ { name = "bandit", specifier = ">=1.8.6" }, { name = "boto3-stubs", extras = ["bedrock-runtime"], specifier = ">=1.40.54" }, { name = "mypy", specifier = ">=1.18.2" }, + { name = "pip", specifier = ">=25.2" }, { name = "pre-commit", specifier = ">=4.3.0" }, { name = "pytest", specifier = ">=8.4.2" }, { name = "pytest-asyncio", specifier = ">=1.2.0" }, @@ -5250,6 +5251,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/3b/ce7a01026a7cf46e5452afa86f97a5e88ca97f562cafa76570178ab56d8d/pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5", size = 2554661, upload-time = "2024-07-01T09:48:20.293Z" }, ] +[[package]] +name = "pip" +version = "25.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, +] + [[package]] name = "platformdirs" version = "4.5.0"