Skip to content

Commit 691ff67

Browse files
partners/openai: fix depracation errors of pydantic's .dict() function (reopen langchain-ai#16629) (langchain-ai#17404)
--------- Co-authored-by: Bagatur <baskaryan@gmail.com>
1 parent bebe401 commit 691ff67

File tree

4 files changed

+19
-15
lines changed

4 files changed

+19
-15
lines changed

libs/partners/openai/langchain_openai/chat_models/azure.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
import openai
99
from langchain_core.outputs import ChatResult
10-
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
10+
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
1111
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
1212

1313
from langchain_openai.chat_models.base import ChatOpenAI
@@ -209,9 +209,11 @@ def lc_attributes(self) -> Dict[str, Any]:
209209
"openai_api_version": self.openai_api_version,
210210
}
211211

212-
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
212+
def _create_chat_result(
213+
self, response: Union[dict, openai.BaseModel]
214+
) -> ChatResult:
213215
if not isinstance(response, dict):
214-
response = response.dict()
216+
response = response.model_dump()
215217
for res in response["choices"]:
216218
if res.get("finish_reason", None) == "content_filter":
217219
raise ValueError(

libs/partners/openai/langchain_openai/chat_models/base.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,7 @@ def _stream(
394394
default_chunk_class = AIMessageChunk
395395
for chunk in self.client.create(messages=message_dicts, **params):
396396
if not isinstance(chunk, dict):
397-
chunk = chunk.dict()
397+
chunk = chunk.model_dump()
398398
if len(chunk["choices"]) == 0:
399399
continue
400400
choice = chunk["choices"][0]
@@ -449,10 +449,12 @@ def _create_message_dicts(
449449
message_dicts = [_convert_message_to_dict(m) for m in messages]
450450
return message_dicts, params
451451

452-
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
452+
def _create_chat_result(
453+
self, response: Union[dict, openai.BaseModel]
454+
) -> ChatResult:
453455
generations = []
454456
if not isinstance(response, dict):
455-
response = response.dict()
457+
response = response.model_dump()
456458
for res in response["choices"]:
457459
message = _convert_dict_to_message(res["message"])
458460
generation_info = dict(finish_reason=res.get("finish_reason"))
@@ -486,7 +488,7 @@ async def _astream(
486488
messages=message_dicts, **params
487489
):
488490
if not isinstance(chunk, dict):
489-
chunk = chunk.dict()
491+
chunk = chunk.model_dump()
490492
if len(chunk["choices"]) == 0:
491493
continue
492494
choice = chunk["choices"][0]

libs/partners/openai/langchain_openai/embeddings/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def _get_len_safe_embeddings(
324324
input=tokens[i : i + _chunk_size], **self._invocation_params
325325
)
326326
if not isinstance(response, dict):
327-
response = response.dict()
327+
response = response.model_dump()
328328
batched_embeddings.extend(r["embedding"] for r in response["data"])
329329

330330
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
@@ -343,7 +343,7 @@ def _get_len_safe_embeddings(
343343
input="", **self._invocation_params
344344
)
345345
if not isinstance(average_embedded, dict):
346-
average_embedded = average_embedded.dict()
346+
average_embedded = average_embedded.model_dump()
347347
average = average_embedded["data"][0]["embedding"]
348348
else:
349349
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
@@ -436,7 +436,7 @@ async def _aget_len_safe_embeddings(
436436
)
437437

438438
if not isinstance(response, dict):
439-
response = response.dict()
439+
response = response.model_dump()
440440
batched_embeddings.extend(r["embedding"] for r in response["data"])
441441

442442
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
@@ -453,7 +453,7 @@ async def _aget_len_safe_embeddings(
453453
input="", **self._invocation_params
454454
)
455455
if not isinstance(average_embedded, dict):
456-
average_embedded = average_embedded.dict()
456+
average_embedded = average_embedded.model_dump()
457457
average = average_embedded["data"][0]["embedding"]
458458
else:
459459
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])

libs/partners/openai/langchain_openai/llms/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def _stream(
251251
self.get_sub_prompts(params, [prompt], stop) # this mutates params
252252
for stream_resp in self.client.create(prompt=prompt, **params):
253253
if not isinstance(stream_resp, dict):
254-
stream_resp = stream_resp.dict()
254+
stream_resp = stream_resp.model_dump()
255255
chunk = _stream_response_to_generation_chunk(stream_resp)
256256
yield chunk
257257
if run_manager:
@@ -279,7 +279,7 @@ async def _astream(
279279
prompt=prompt, **params
280280
):
281281
if not isinstance(stream_resp, dict):
282-
stream_resp = stream_resp.dict()
282+
stream_resp = stream_resp.model_dump()
283283
chunk = _stream_response_to_generation_chunk(stream_resp)
284284
yield chunk
285285
if run_manager:
@@ -357,7 +357,7 @@ def _generate(
357357
if not isinstance(response, dict):
358358
# V1 client returns the response in an PyDantic object instead of
359359
# dict. For the transition period, we deep convert it to dict.
360-
response = response.dict()
360+
response = response.model_dump()
361361

362362
choices.extend(response["choices"])
363363
_update_token_usage(_keys, response, token_usage)
@@ -420,7 +420,7 @@ async def _agenerate(
420420
else:
421421
response = await self.async_client.create(prompt=_prompts, **params)
422422
if not isinstance(response, dict):
423-
response = response.dict()
423+
response = response.model_dump()
424424
choices.extend(response["choices"])
425425
_update_token_usage(_keys, response, token_usage)
426426
return self.create_llm_result(

0 commit comments

Comments
 (0)