You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Latest Google genai library allows you to specify a response schema with typing hints (link)
importgoogle.generativeaiasgenaimodel=genai.GenerativeModel("gemini-1.5-pro-latest")
result=model.generate_content(
"Give me a JSON list of the first 10 prime numbers.",
generation_config=genai.GenerationConfig(
response_mime_type="application/json", response_schema=list[int]
),
)
print(result)
However, trying to do so with the wrapped models results in an error
fromlangchain_google_genaiimportChatGoogleGenerativeAImodel=ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest",
)
model.invoke(
"Give me a JSON list of the first 10 prime numbers.",
generation_config={
"response_mime_type": "application/json",
"response_schema": {"numbers": list[int]},
}
)
=====
{
"name": "ValueError",
"message": "Unknown field for Schema: numbers",
"stack": "---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/proto/marshal/rules/message.py:36, in MessageRule.to_proto(self, value)
34 try:
35 # Try the fast path first.
---> 36 return self._descriptor(**value)
37 except (TypeError, ValueError) as ex:
38 # If we have a TypeError or Valueerror,
39 # try the slow path in case the error
(...)
43 # See related issue: https://github.com/googleapis/python-api-core/issues/227.
44 # - a missing key issue due to nested struct. See: b/321905145.
ValueError: Protocol message Schema has no \"numbers\" field.
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
Cell In[23], line 8
1 from langchain_google_genai import ChatGoogleGenerativeAI
4 model = ChatGoogleGenerativeAI(
5 model=\"gemini-1.5-pro-latest\",
6 )
----> 8 model.invoke(
9 \"Give me a JSON list of the first 10 prime numbers.\",
10 generation_config={
11 \"response_mime_type\": \"application/json\",
12 \"response_schema\": {\"numbers\": list[int]},
13 }
14 ).content
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:286, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
275 def invoke(
276 self,
277 input: LanguageModelInput,
(...)
281 **kwargs: Any,
282 ) -> BaseMessage:
283 config = ensure_config(config)
284 return cast(
285 ChatGeneration,
--> 286 self.generate_prompt(
287 [self._convert_input(input)],
288 stop=stop,
289 callbacks=config.get(\"callbacks\"),
290 tags=config.get(\"tags\"),
291 metadata=config.get(\"metadata\"),
292 run_name=config.get(\"run_name\"),
293 run_id=config.pop(\"run_id\", None),
294 **kwargs,
295 ).generations[0][0],
296 ).message
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:786, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
778 def generate_prompt(
779 self,
780 prompts: list[PromptValue],
(...)
783 **kwargs: Any,
784 ) -> LLMResult:
785 prompt_messages = [p.to_messages() for p in prompts]
--> 786 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:643, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
641 if run_managers:
642 run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 643 raise e
644 flattened_outputs = [
645 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
646 for res in results
647 ]
648 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:633, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
630 for i, m in enumerate(messages):
631 try:
632 results.append(
--> 633 self._generate_with_cache(
634 m,
635 stop=stop,
636 run_manager=run_managers[i] if run_managers else None,
637 **kwargs,
638 )
639 )
640 except BaseException as e:
641 if run_managers:
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:851, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
849 else:
850 if inspect.signature(self._generate).parameters.get(\"run_manager\"):
--> 851 result = self._generate(
852 messages, stop=stop, run_manager=run_manager, **kwargs
853 )
854 else:
855 result = self._generate(messages, stop=stop, **kwargs)
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:935, in ChatGoogleGenerativeAI._generate(self, messages, stop, run_manager, tools, functions, safety_settings, tool_config, generation_config, cached_content, tool_choice, **kwargs)
920 def _generate(
921 self,
922 messages: List[BaseMessage],
(...)
933 **kwargs: Any,
934 ) -> ChatResult:
--> 935 request = self._prepare_request(
936 messages,
937 stop=stop,
938 tools=tools,
939 functions=functions,
940 safety_settings=safety_settings,
941 tool_config=tool_config,
942 generation_config=generation_config,
943 cached_content=cached_content or self.cached_content,
944 tool_choice=tool_choice,
945 )
946 response: GenerateContentResponse = _chat_with_retry(
947 request=request,
948 **kwargs,
949 generation_method=self.client.generate_content,
950 metadata=self.default_metadata,
951 )
952 return _response_to_result(response)
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:1198, in ChatGoogleGenerativeAI._prepare_request(self, messages, stop, tools, functions, safety_settings, tool_config, tool_choice, generation_config, cached_content)
1187 if safety_settings:
1188 formatted_safety_settings = [
1189 SafetySetting(category=c, threshold=t)
1190 for c, t in safety_settings.items()
1191 ]
1192 request = GenerateContentRequest(
1193 model=self.model,
1194 contents=history,
1195 tools=formatted_tools,
1196 tool_config=formatted_tool_config,
1197 safety_settings=formatted_safety_settings,
-> 1198 generation_config=self._prepare_params(
1199 stop, generation_config=generation_config
1200 ),
1201 cached_content=cached_content,
1202 )
1203 if system_instruction:
1204 request.system_instruction = system_instruction
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:918, in ChatGoogleGenerativeAI._prepare_params(self, stop, generation_config)
916 if generation_config:
917 gen_config = {**gen_config, **generation_config}
--> 918 return GenerationConfig(**gen_config)
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/proto/message.py:728, in Message.__init__(self, mapping, ignore_unknown_fields, **kwargs)
722 continue
724 raise ValueError(
725 \"Unknown field for {}: {}\".format(self.__class__.__name__, key)
726 )
--> 728 pb_value = marshal.to_proto(pb_type, value)
730 if pb_value is not None:
731 params[key] = pb_value
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/proto/marshal/marshal.py:235, in BaseMarshal.to_proto(self, proto_type, value, strict)
232 recursive_type = type(proto_type().value)
233 return {k: self.to_proto(recursive_type, v) for k, v in value.items()}
--> 235 pb_value = self.get_rule(proto_type=proto_type).to_proto(value)
237 # Sanity check: If we are in strict mode, did we get the value we want?
238 if strict and not isinstance(pb_value, proto_type):
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/proto/marshal/rules/message.py:45, in MessageRule.to_proto(self, value)
36 return self._descriptor(**value)
37 except (TypeError, ValueError) as ex:
38 # If we have a TypeError or Valueerror,
39 # try the slow path in case the error
(...)
43 # See related issue: https://github.com/googleapis/python-api-core/issues/227.
44 # - a missing key issue due to nested struct. See: b/321905145.
---> 45 return self._wrapper(value)._pb
46 return value
File ~/miniconda3/envs/axiom/lib/python3.11/site-packages/proto/message.py:724, in Message.__init__(self, mapping, ignore_unknown_fields, **kwargs)
721 if ignore_unknown_fields:
722 continue
--> 724 raise ValueError(
725 \"Unknown field for {}: {}\".format(self.__class__.__name__, key)
726 )
728 pb_value = marshal.to_proto(pb_type, value)
730 if pb_value is not None:
ValueError: Unknown field for Schema: numbers"
}
It seems that the GenerationConfig used in this library and the one in the first snippet of code are different, resulting in an error.
The text was updated successfully, but these errors were encountered:
Latest Google genai library allows you to specify a response schema with typing hints (link)
However, trying to do so with the wrapped models results in an error
=====
It seems that the
GenerationConfig
used in this library and the one in the first snippet of code are different, resulting in an error.The text was updated successfully, but these errors were encountered: