Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions litellm/llms/meta_llama/chat/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,6 @@
Docs: https://llama.developer.meta.com/docs/features/compatibility/
"""

import warnings

# Suppress Pydantic serialization warnings for Meta Llama responses
warnings.filterwarnings("ignore", message="Pydantic serializer warnings")

from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig


Expand Down
52 changes: 13 additions & 39 deletions litellm/types/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,16 +632,18 @@ class Message(OpenAIObject):
role: Literal["assistant", "user", "system", "tool", "function"]
tool_calls: Optional[List[ChatCompletionMessageToolCall]]
function_call: Optional[FunctionCall]
audio: Optional[ChatCompletionAudioResponse] = None
images: Optional[List[ImageURLListItem]] = None
reasoning_content: Optional[str] = None
audio: Optional[ChatCompletionAudioResponse] = Field(default=None, exclude=True)
images: Optional[List[ImageURLListItem]] = Field(default=None, exclude=True)
reasoning_content: Optional[str] = Field(default=None, exclude=True)
thinking_blocks: Optional[
List[Union[ChatCompletionThinkingBlock, ChatCompletionRedactedThinkingBlock]]
] = None
] = Field(default=None, exclude=True)
provider_specific_fields: Optional[Dict[str, Any]] = Field(
default=None, exclude=True
)
annotations: Optional[List[ChatCompletionAnnotation]] = None
annotations: Optional[List[ChatCompletionAnnotation]] = Field(
default=None, exclude=True
)

def __init__(
self,
Expand Down Expand Up @@ -701,32 +703,6 @@ def __init__(
**params,
)

if audio is None:
# delete audio from self
# OpenAI compatible APIs like mistral API will raise an error if audio is passed in
if hasattr(self, "audio"):
del self.audio

if images is None:
if hasattr(self, "images"):
del self.images

if annotations is None:
# ensure default response matches OpenAI spec
# Some OpenAI compatible APIs raise an error if annotations are passed in
if hasattr(self, "annotations"):
del self.annotations

if reasoning_content is None:
# ensure default response matches OpenAI spec
if hasattr(self, "reasoning_content"):
del self.reasoning_content

if thinking_blocks is None:
# ensure default response matches OpenAI spec
if hasattr(self, "thinking_blocks"):
del self.thinking_blocks

add_provider_specific_fields(self, provider_specific_fields)

def get(self, key, default=None):
Expand Down Expand Up @@ -848,9 +824,11 @@ class Choices(OpenAIObject):
finish_reason: str
index: int
message: Message
logprobs: Optional[Union[ChoiceLogprobs, Any]] = None
logprobs: Optional[Union[ChoiceLogprobs, Any]] = Field(default=None, exclude=True)

provider_specific_fields: Optional[Dict[str, Any]] = Field(default=None)
provider_specific_fields: Optional[Dict[str, Any]] = Field(
default=None, exclude=True
)

def __init__(
self,
Expand Down Expand Up @@ -889,12 +867,8 @@ def __init__(
if enhancements is not None:
self.enhancements = enhancements

self.provider_specific_fields = provider_specific_fields

if self.logprobs is None:
del self.logprobs
if self.provider_specific_fields is None:
del self.provider_specific_fields
if provider_specific_fields is not None:
self.provider_specific_fields = provider_specific_fields

def __contains__(self, key):
# Define custom behavior for the 'in' operator
Expand Down
44 changes: 44 additions & 0 deletions tests/test_litellm/test_global_filter_pollution.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""Test demonstrating LiteLLM's global warning filter pollution.

This test demonstrates that the global warning suppression introduced in commit
08b2b4f5f5 (June 19, 2025, PR #11895) affects non-LiteLLM code (e.g., user
application code) as well.

Specifically, litellm/llms/meta_llama/chat/transformation.py:12 installs:
warnings.filterwarnings('ignore', message='Pydantic serializer warnings')

This is a GLOBAL filter that suppresses ANY warning matching that pattern,
regardless of whether it comes from LiteLLM code or user code.

EXPECTED: User warnings should be visible
ACTUAL: User warnings are SUPPRESSED by LiteLLM's global filter (test FAILS)

For full context, see: https://github.com/BerriAI/litellm/issues/11759#issuecomment-3494387017
"""
import sys
import warnings


def user_function_that_generates_warning(context: str):
with warnings.catch_warnings(record=True) as w:
# Application generates some pydantic warning completely unrelated to LiteLLM
warnings.warn(f"Pydantic serializer warnings: context: {context}")

assert len(w) > 0, f"User Pydantic warnings should be captured - context: {context}"
assert "Pydantic serializer warnings" in str(w[0].message)

def test_1_user_warning_baseline():
"""Baseline: User warnings work without litellm imported.

Only meaningful when run in isolation before litellm is imported.
"""
if 'litellm' in sys.modules:
return
user_function_that_generates_warning("baseline")


def test_2_litellm_suppresses_user_warnings():
"""Like the baseline, LiteLLM should not suppress user warnings"""
import litellm # noqa: F401

user_function_that_generates_warning("after litellm import")
70 changes: 70 additions & 0 deletions tests/test_litellm/test_pydantic_serialization_warnings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
"""Test demonstrating Pydantic serialization warnings from LiteLLM responses.

LiteLLM's Message and Choices classes use an anti-pattern: deleting fields after
initialization instead of using Field(exclude=True). This causes Pydantic to emit
serialization warnings when model_dump() is called on responses.

The anti-pattern was introduced in multiple commits:
- Message.audio: 13e0b3f626 (Oct 18, 2024) - PR #6304
- Message.thinking_blocks: ab7c4d1a0e (Feb 26, 2025) - PR #8843
- Message.annotations: 44f4c623e2 (Mar 22, 2025)

Instead of fixing the root cause, commit 08b2b4f5f5 (June 19, 2025, PR #11895)
added a GLOBAL warning filter that suppresses these warnings everywhere.

EXPECTED: No warnings when serializing LiteLLM responses
ACTUAL: Warnings ARE generated (but hidden by global filter)

For full context, see: https://github.com/BerriAI/litellm/issues/11759#issuecomment-3494387017
"""
import warnings

from litellm.types.utils import Choices, Message, ModelResponse


def test_response_serialization_warnings():
"""ModelResponse objects generate Pydantic warnings due to 'del self.field' anti-pattern."""
# Create a response like LiteLLM would return
response = ModelResponse(
id="test-id",
choices=[
Choices(
index=0,
message=Message(
content="test response",
role="assistant",
audio=None, # Triggers del self.audio in Message.__init__
),
)
],
created=1234567890,
model="test-model",
object="chat.completion",
)

# Explicitly capture warnings, overriding any global filters
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
response.model_dump()

# Test FAILS when bug exists (warnings generated)
# Test PASSES when bug is fixed (no warnings)
assert len(w) == 0, (
f"Pydantic serialization warnings detected ({len(w)} warning(s)).\n"
"\n"
f"Warning: {w[0].message if w else 'N/A'}\n"
"\n"
"Root cause: Message/Choices classes use 'del self.field' anti-pattern in __init__\n"
"Fix: Use Field(exclude=True) instead of 'del self.field'\n"
"\n"
"Problematic fields in litellm/types/utils.py:\n"
" Message.__init__:\n"
" - audio (line 708): del self.audio\n"
" - images (line 712): del self.images\n"
" - annotations (line 718): del self.annotations\n"
" - reasoning_content (line 723): del self.reasoning_content\n"
" - thinking_blocks (line 728): del self.thinking_blocks\n"
" Choices.__init__:\n"
" - logprobs (line 897): del self.logprobs\n"
" - provider_specific_fields (similar pattern)"
)
54 changes: 54 additions & 0 deletions tests/test_litellm/test_warnings_leak_in_strict_mode.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Test demonstrating that Pydantic warnings leak through as visible output.

LiteLLM's Message and Choices classes use an anti-pattern: deleting fields after
initialization instead of using Field(exclude=True). This was introduced in:
- Message.audio: 13e0b3f626 (Oct 18, 2024) - PR #6304
- Message.thinking_blocks: ab7c4d1a0e (Feb 26, 2025) - PR #8843
- Message.annotations: 44f4c623e2 (Mar 22, 2025)

Instead of fixing the root cause, commit 08b2b4f5f5 (June 19, 2025, PR #11895)
added a global filter at transformation.py:12 to suppress these warnings.

However, the global filter doesn't prevent warnings from being generated - it only
suppresses them. Pytest still collects them and displays them in the "warnings
summary" section at the end of test runs, polluting test output.

Run this test with: pytest tests/test_warnings_leak_in_strict_mode.py -xvs
You'll see warnings in the output even though tests pass (without this fix).

For full context, see: https://github.com/BerriAI/litellm/issues/11759#issuecomment-3494387017
"""
from litellm.types.utils import Choices, Message, ModelResponse


def test_serialization_produces_visible_warnings(recwarn):
response = ModelResponse(
id="test-id",
choices=[
Choices(
index=0,
message=Message(
content="test response",
role="assistant",
audio=None, # Triggers del self.audio
),
)
],
created=1234567890,
model="test-model",
object="chat.completion",
)

# Just do a normal model_dump
response.model_dump()

# Check if pytest collected any warnings
warnings_list = list(recwarn)

# Test FAILS if warnings were collected (leaked through despite global filter)
# Test PASSES if no warnings (fix applied, no warnings generated)
assert len(warnings_list) == 0, (
f"Serialization produced {len(warnings_list)} warning(s) that leak into pytest output.\n"
"\n"
f"Warning message: {warnings_list[0].message if warnings_list else 'N/A'}"
)
Loading