Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install Hatch
run: pip install hatch
- name: Install Poetry
run: pipx install poetry
- name: Load cached venv
id: cached-hatch-dependencies
uses: actions/cache@v3
Expand Down
2 changes: 1 addition & 1 deletion embedchain/embedchain/helpers/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import Any, Union

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import LLMResult
from langchain_core.outputs import LLMResult

STOP_ITEM = "[END]"
"""
Expand Down
4 changes: 2 additions & 2 deletions embedchain/embedchain/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from collections.abc import Generator
from typing import Any, Optional

from langchain.schema import BaseMessage as LCBaseMessage
from langchain_core.messages import BaseMessage as LCBaseMessage

from embedchain.config import BaseLlmConfig
from embedchain.config.llm.base import (
Expand Down Expand Up @@ -341,7 +341,7 @@ def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> list[LCBa
:return: List of messages
:rtype: list[BaseMessage]
"""
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage

messages = []
if system_prompt:
Expand Down
2 changes: 1 addition & 1 deletion embedchain/embedchain/llm/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import Any, Optional

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage

try:
from langchain_groq import ChatGroq
Expand Down
2 changes: 1 addition & 1 deletion embedchain/embedchain/llm/jina.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
from typing import Optional

from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_community.chat_models import JinaChat

from embedchain.config import BaseLlmConfig
Expand Down
2 changes: 1 addition & 1 deletion embedchain/embedchain/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any, Callable, Dict, Optional, Type, Union

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
Expand Down
4 changes: 2 additions & 2 deletions embedchain/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ chromadb = "^0.5.10"
posthog = "^3.0.2"
rich = "^13.7.0"
beautifulsoup4 = "^4.12.2"
pypdf = "^5.0.0"
pypdf = "^6.0.0"
gptcache = "^0.1.43"
pysbd = "^0.3.4"
mem0ai = "^0.1.54"
Expand Down Expand Up @@ -186,4 +186,4 @@ aws = ["langchain-aws"]
[tool.poetry.group.docs.dependencies]

[tool.poetry.scripts]
ec = "embedchain.cli:cli"
ec = "embedchain.cli:cli"
2 changes: 1 addition & 1 deletion embedchain/tests/llm/test_anthrophic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from unittest.mock import patch

import pytest
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage

from embedchain.config import BaseLlmConfig
from embedchain.llm.anthropic import AnthropicLlm
Expand Down
2 changes: 1 addition & 1 deletion embedchain/tests/llm/test_azure_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import httpx
import pytest
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage

from embedchain.config import BaseLlmConfig
from embedchain.llm.azure_openai import AzureOpenAILlm
Expand Down
2 changes: 1 addition & 1 deletion embedchain/tests/llm/test_vertex_ai.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from unittest.mock import MagicMock, patch

import pytest
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import HumanMessage, SystemMessage

from embedchain.config import BaseLlmConfig
from embedchain.core.db.database import database_manager
Expand Down
2 changes: 1 addition & 1 deletion embedchain/tests/loaders/test_pdf_file.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import pytest
from langchain.schema import Document
from langchain_core.documents import Document


def test_load_data(loader, mocker):
Expand Down
2 changes: 1 addition & 1 deletion examples/misc/personalized_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langchain_tavily import TavilySearch
from langchain.schema import HumanMessage
from langchain_core.messages import HumanMessage
from datetime import datetime
import logging

Expand Down
2 changes: 1 addition & 1 deletion mem0/vector_stores/azure_ai_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def search(self, query, vectors, limit=5, filters=None):
if filters:
filter_expression = self._build_filter_expression(filters)

vector_query = VectorizedQuery(vector=vectors, k_nearest_neighbors=limit, fields="vector")
vector_query = VectorizedQuery(vector=vectors, k_nearest_neighbors_count=limit, fields=["vector"])
if self.hybrid_search:
search_results = self.search_client.search(
search_text=query,
Expand Down
2 changes: 1 addition & 1 deletion mem0/vector_stores/vertex_ai_vector_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
try:
from langchain_core.documents import Document
except ImportError: # pragma: no cover - fallback for older LangChain versions
from langchain.schema import Document # type: ignore[no-redef]
from langchain_core.documents import Document # type: ignore[no-redef]

from mem0.configs.vector_stores.vertex_ai_vector_search import (
GoogleMatchingEngineConfig,
Expand Down
2 changes: 1 addition & 1 deletion tests/vector_stores/test_azure_ai_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ def test_search_basic(azure_ai_search_instance):
# Check parameters
assert len(kwargs["vector_queries"]) == 1
assert kwargs["vector_queries"][0].vector == query_vector
assert kwargs["vector_queries"][0].k_nearest_neighbors == 5
assert kwargs["vector_queries"][0].k_nearest_neighbors_count == 5
assert kwargs["vector_queries"][0].fields == "vector"
assert kwargs["filter"] is None # No filters
assert kwargs["top"] == 5
Expand Down
Loading