Skip to content

Commit 3925071

Browse files
authored
langchain[patch], templates[patch]: fix multi query retriever, web re… (langchain-ai#17434)
…search retriever Fixes langchain-ai#17352
1 parent c0ce932 commit 3925071

File tree

4 files changed

+20
-64
lines changed

4 files changed

+20
-64
lines changed

libs/langchain/langchain/retrievers/multi_query.py

Lines changed: 11 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,28 @@
11
import asyncio
22
import logging
3-
from typing import List, Sequence
3+
from typing import List, Optional, Sequence
44

55
from langchain_core.callbacks import (
66
AsyncCallbackManagerForRetrieverRun,
77
CallbackManagerForRetrieverRun,
88
)
99
from langchain_core.documents import Document
10-
from langchain_core.language_models import BaseLLM
10+
from langchain_core.language_models import BaseLanguageModel
11+
from langchain_core.output_parsers import BaseOutputParser
1112
from langchain_core.prompts.prompt import PromptTemplate
12-
from langchain_core.pydantic_v1 import BaseModel, Field
1313
from langchain_core.retrievers import BaseRetriever
1414

1515
from langchain.chains.llm import LLMChain
16-
from langchain.output_parsers.pydantic import PydanticOutputParser
1716

1817
logger = logging.getLogger(__name__)
1918

2019

21-
class LineList(BaseModel):
22-
"""List of lines."""
23-
24-
lines: List[str] = Field(description="Lines of text")
25-
"""List of lines."""
26-
27-
28-
class LineListOutputParser(PydanticOutputParser):
20+
class LineListOutputParser(BaseOutputParser[List[str]]):
2921
"""Output parser for a list of lines."""
3022

31-
def __init__(self) -> None:
32-
super().__init__(pydantic_object=LineList)
33-
34-
def parse(self, text: str) -> LineList:
23+
def parse(self, text: str) -> List[str]:
3524
lines = text.strip().split("\n")
36-
return LineList(lines=lines)
25+
return lines
3726

3827

3928
# Default prompt
@@ -63,16 +52,17 @@ class MultiQueryRetriever(BaseRetriever):
6352
llm_chain: LLMChain
6453
verbose: bool = True
6554
parser_key: str = "lines"
55+
"""DEPRECATED. parser_key is no longer used and should not be specified."""
6656
include_original: bool = False
6757
"""Whether to include the original query in the list of generated queries."""
6858

6959
@classmethod
7060
def from_llm(
7161
cls,
7262
retriever: BaseRetriever,
73-
llm: BaseLLM,
63+
llm: BaseLanguageModel,
7464
prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
75-
parser_key: str = "lines",
65+
parser_key: Optional[str] = None,
7666
include_original: bool = False,
7767
) -> "MultiQueryRetriever":
7868
"""Initialize from llm using default template.
@@ -91,7 +81,6 @@ def from_llm(
9181
return cls(
9282
retriever=retriever,
9383
llm_chain=llm_chain,
94-
parser_key=parser_key,
9584
include_original=include_original,
9685
)
9786

@@ -129,7 +118,7 @@ async def agenerate_queries(
129118
response = await self.llm_chain.acall(
130119
inputs={"question": question}, callbacks=run_manager.get_child()
131120
)
132-
lines = getattr(response["text"], self.parser_key, [])
121+
lines = response["text"]
133122
if self.verbose:
134123
logger.info(f"Generated queries: {lines}")
135124
return lines
@@ -189,7 +178,7 @@ def generate_queries(
189178
response = self.llm_chain(
190179
{"question": question}, callbacks=run_manager.get_child()
191180
)
192-
lines = getattr(response["text"], self.parser_key, [])
181+
lines = response["text"]
193182
if self.verbose:
194183
logger.info(f"Generated queries: {lines}")
195184
return lines

libs/langchain/langchain/retrievers/web_research.py

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212
)
1313
from langchain_core.documents import Document
1414
from langchain_core.language_models import BaseLLM
15+
from langchain_core.output_parsers import BaseOutputParser
1516
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
1617
from langchain_core.pydantic_v1 import BaseModel, Field
1718
from langchain_core.retrievers import BaseRetriever
1819
from langchain_core.vectorstores import VectorStore
1920

2021
from langchain.chains import LLMChain
2122
from langchain.chains.prompt_selector import ConditionalPromptSelector
22-
from langchain.output_parsers.pydantic import PydanticOutputParser
2323
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
2424

2525
logger = logging.getLogger(__name__)
@@ -50,21 +50,12 @@ class SearchQueries(BaseModel):
5050
)
5151

5252

53-
class LineList(BaseModel):
54-
"""List of questions."""
55-
56-
lines: List[str] = Field(description="Questions")
57-
58-
59-
class QuestionListOutputParser(PydanticOutputParser):
53+
class QuestionListOutputParser(BaseOutputParser[List[str]]):
6054
"""Output parser for a list of numbered questions."""
6155

62-
def __init__(self) -> None:
63-
super().__init__(pydantic_object=LineList)
64-
65-
def parse(self, text: str) -> LineList:
56+
def parse(self, text: str) -> List[str]:
6657
lines = re.findall(r"\d+\..*?(?:\n|$)", text)
67-
return LineList(lines=lines)
58+
return lines
6859

6960

7061
class WebResearchRetriever(BaseRetriever):
@@ -176,7 +167,7 @@ def _get_relevant_documents(
176167
logger.info("Generating questions for Google Search ...")
177168
result = self.llm_chain({"question": query})
178169
logger.info(f"Questions for Google Search (raw): {result}")
179-
questions = getattr(result["text"], "lines", [])
170+
questions = result["text"]
180171
logger.info(f"Questions for Google Search: {questions}")
181172

182173
# Get urls

libs/langchain/tests/unit_tests/retrievers/test_web_research.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,4 @@
3333
def test_list_output_parser(text: str, expected: List[str]) -> None:
3434
parser = QuestionListOutputParser()
3535
result = parser.parse(text)
36-
assert result.lines == expected
36+
assert result == expected

templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py

Lines changed: 3 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,3 @@
1-
from typing import List
2-
3-
from langchain.chains import LLMChain
4-
from langchain.output_parsers import PydanticOutputParser
51
from langchain.retrievers.multi_query import MultiQueryRetriever
62
from langchain.text_splitter import RecursiveCharacterTextSplitter
73
from langchain_community.chat_models import ChatOllama, ChatOpenAI
@@ -10,7 +6,7 @@
106
from langchain_community.vectorstores import Chroma
117
from langchain_core.output_parsers import StrOutputParser
128
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
13-
from langchain_core.pydantic_v1 import BaseModel, Field
9+
from langchain_core.pydantic_v1 import BaseModel
1410
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
1511

1612
# Load
@@ -29,23 +25,6 @@
2925
)
3026

3127

32-
# Output parser will split the LLM result into a list of queries
33-
class LineList(BaseModel):
34-
# "lines" is the key (attribute name) of the parsed output
35-
lines: List[str] = Field(description="Lines of text")
36-
37-
38-
class LineListOutputParser(PydanticOutputParser):
39-
def __init__(self) -> None:
40-
super().__init__(pydantic_object=LineList)
41-
42-
def parse(self, text: str) -> LineList:
43-
lines = text.strip().split("\n")
44-
return LineList(lines=lines)
45-
46-
47-
output_parser = LineListOutputParser()
48-
4928
QUERY_PROMPT = PromptTemplate(
5029
input_variables=["question"],
5130
template="""You are an AI language model assistant. Your task is to generate five
@@ -60,12 +39,9 @@ def parse(self, text: str) -> LineList:
6039
ollama_llm = "zephyr"
6140
llm = ChatOllama(model=ollama_llm)
6241

63-
# Chain
64-
llm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)
65-
6642
# Run
67-
retriever = MultiQueryRetriever(
68-
retriever=vectorstore.as_retriever(), llm_chain=llm_chain, parser_key="lines"
43+
retriever = MultiQueryRetriever.from_llm(
44+
vectorstore.as_retriever(), llm, prompt=QUERY_PROMPT
6945
) # "lines" is the key (attribute name) of the parsed output
7046

7147
# RAG prompt

0 commit comments

Comments
 (0)