From 581beaf85c1ad9f9a3fc083bcaef62705890d631 Mon Sep 17 00:00:00 2001 From: Sameer Kankute Date: Thu, 6 Nov 2025 15:30:53 +0530 Subject: [PATCH 1/5] Added xai responses support --- litellm/__init__.py | 1 + litellm/llms/xai/responses/__init__.py | 5 + litellm/llms/xai/responses/transformation.py | 146 ++++++++++++++++++ litellm/utils.py | 2 + .../llms/xai/responses/__init__.py | 2 + .../llms/xai/responses/test_transformation.py | 112 ++++++++++++++ 6 files changed, 268 insertions(+) create mode 100644 litellm/llms/xai/responses/__init__.py create mode 100644 litellm/llms/xai/responses/transformation.py create mode 100644 tests/test_litellm/llms/xai/responses/__init__.py create mode 100644 tests/test_litellm/llms/xai/responses/test_transformation.py diff --git a/litellm/__init__.py b/litellm/__init__.py index 29d83415e776..20fe4a2aeacd 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1236,6 +1236,7 @@ def add_known_models(): from .llms.azure.responses.o_series_transformation import ( AzureOpenAIOSeriesResponsesAPIConfig, ) +from .llms.xai.responses.transformation import XAIResponsesAPIConfig from .llms.litellm_proxy.responses.transformation import ( LiteLLMProxyResponsesAPIConfig, ) diff --git a/litellm/llms/xai/responses/__init__.py b/litellm/llms/xai/responses/__init__.py new file mode 100644 index 000000000000..9610a1197875 --- /dev/null +++ b/litellm/llms/xai/responses/__init__.py @@ -0,0 +1,5 @@ +# XAI Responses API +from .transformation import XAIResponsesAPIConfig + +__all__ = ["XAIResponsesAPIConfig"] + diff --git a/litellm/llms/xai/responses/transformation.py b/litellm/llms/xai/responses/transformation.py new file mode 100644 index 000000000000..5767177b7a64 --- /dev/null +++ b/litellm/llms/xai/responses/transformation.py @@ -0,0 +1,146 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +import litellm +from litellm._logging import verbose_logger +from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams +from litellm.types.router import GenericLiteLLMParams +from litellm.types.utils import LlmProviders + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj + + LiteLLMLoggingObj = _LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + +XAI_API_BASE = "https://api.x.ai/v1" + + +class XAIResponsesAPIConfig(OpenAIResponsesAPIConfig): + """ + Configuration for XAI's Responses API. + + Inherits from OpenAIResponsesAPIConfig since XAI's Responses API is largely + compatible with OpenAI's, with a few differences: + - Does not support the 'instructions' parameter + - Requires code_interpreter tools to have 'container' field removed + - Recommends store=false when sending images + + Reference: https://docs.x.ai/docs/api-reference#create-new-response + """ + + @property + def custom_llm_provider(self) -> LlmProviders: + return LlmProviders.XAI + + def get_supported_openai_params(self, model: str) -> list: + """ + Get supported parameters for XAI Responses API. + + XAI supports most OpenAI Responses API params except 'instructions'. + """ + supported_params = super().get_supported_openai_params(model) + + # Remove 'instructions' as it's not supported by XAI + if "instructions" in supported_params: + supported_params.remove("instructions") + + return supported_params + + def map_openai_params( + self, + response_api_optional_params: ResponsesAPIOptionalRequestParams, + model: str, + drop_params: bool, + ) -> Dict: + """ + Map parameters for XAI Responses API. + + Handles XAI-specific transformations: + 1. Drops 'instructions' parameter (not supported) + 2. Transforms code_interpreter tools to remove 'container' field + 3. Sets store=false when images are detected (recommended by XAI) + """ + params = dict(response_api_optional_params) + + # Drop instructions parameter (not supported by XAI) + if "instructions" in params: + verbose_logger.debug( + "XAI Responses API does not support 'instructions' parameter. Dropping it." + ) + params.pop("instructions") + + # Transform code_interpreter tools - remove container field + if "tools" in params and params["tools"]: + tools_list = params["tools"] + # Ensure tools is a list for iteration + if not isinstance(tools_list, list): + tools_list = [tools_list] + + transformed_tools: List[Any] = [] + for tool in tools_list: + if isinstance(tool, dict) and tool.get("type") == "code_interpreter": + # XAI supports code_interpreter but doesn't use the container field + # Keep only the type field + verbose_logger.debug( + f"XAI: Transforming code_interpreter tool, removing container field" + ) + transformed_tools.append({"type": "code_interpreter"}) + else: + transformed_tools.append(tool) + params["tools"] = transformed_tools + + return params + + def validate_environment( + self, headers: dict, model: str, litellm_params: Optional[GenericLiteLLMParams] + ) -> dict: + """ + Validate environment and set up headers for XAI API. + + Uses XAI_API_KEY from environment or litellm_params. + """ + litellm_params = litellm_params or GenericLiteLLMParams() + api_key = ( + litellm_params.api_key + or litellm.api_key + or get_secret_str("XAI_API_KEY") + ) + + if not api_key: + raise ValueError( + "XAI API key is required. Set XAI_API_KEY environment variable or pass api_key parameter." + ) + + headers.update( + { + "Authorization": f"Bearer {api_key}", + } + ) + return headers + + def get_complete_url( + self, + api_base: Optional[str], + litellm_params: dict, + ) -> str: + """ + Get the complete URL for XAI Responses API endpoint. + + Returns: + str: The full URL for the XAI /responses endpoint + """ + api_base = ( + api_base + or litellm.api_base + or get_secret_str("XAI_API_BASE") + or XAI_API_BASE + ) + + # Remove trailing slashes + api_base = api_base.rstrip("/") + + return f"{api_base}/responses" + diff --git a/litellm/utils.py b/litellm/utils.py index e924543df228..f487adb5fcd0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -7363,6 +7363,8 @@ def get_provider_responses_api_config( return litellm.AzureOpenAIResponsesAPIConfig() elif litellm.LlmProviders.LITELLM_PROXY == provider: return litellm.LiteLLMProxyResponsesAPIConfig() + elif litellm.LlmProviders.XAI == provider: + return litellm.XAIResponsesAPIConfig() return None @staticmethod diff --git a/tests/test_litellm/llms/xai/responses/__init__.py b/tests/test_litellm/llms/xai/responses/__init__.py new file mode 100644 index 000000000000..451d016fb21d --- /dev/null +++ b/tests/test_litellm/llms/xai/responses/__init__.py @@ -0,0 +1,2 @@ +# XAI Responses API tests + diff --git a/tests/test_litellm/llms/xai/responses/test_transformation.py b/tests/test_litellm/llms/xai/responses/test_transformation.py new file mode 100644 index 000000000000..c0871d3b9b7e --- /dev/null +++ b/tests/test_litellm/llms/xai/responses/test_transformation.py @@ -0,0 +1,112 @@ +""" +Tests for XAI Responses API transformation + +Tests the XAIResponsesAPIConfig class that handles XAI-specific +transformations for the Responses API. + +Source: litellm/llms/xai/responses/transformation.py +""" +import sys +import os + +sys.path.insert(0, os.path.abspath("../../../../..")) + +import pytest +from litellm.types.utils import LlmProviders +from litellm.utils import ProviderConfigManager +from litellm.llms.xai.responses.transformation import XAIResponsesAPIConfig +from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams + + +class TestXAIResponsesAPITransformation: + """Test XAI Responses API configuration and transformations""" + + def test_xai_provider_config_registration(self): + """Test that XAI provider returns XAIResponsesAPIConfig""" + config = ProviderConfigManager.get_provider_responses_api_config( + model="xai/grok-4-fast", + provider=LlmProviders.XAI, + ) + + assert config is not None, "Config should not be None for XAI provider" + assert isinstance( + config, XAIResponsesAPIConfig + ), f"Expected XAIResponsesAPIConfig, got {type(config)}" + assert ( + config.custom_llm_provider == LlmProviders.XAI + ), "custom_llm_provider should be XAI" + + def test_code_interpreter_container_field_removed(self): + """Test that container field is removed from code_interpreter tools""" + config = XAIResponsesAPIConfig() + + params = ResponsesAPIOptionalRequestParams( + tools=[ + { + "type": "code_interpreter", + "container": {"type": "auto"} + } + ] + ) + + result = config.map_openai_params( + response_api_optional_params=params, + model="grok-4-fast", + drop_params=False + ) + + assert "tools" in result + assert len(result["tools"]) == 1 + assert result["tools"][0]["type"] == "code_interpreter" + assert "container" not in result["tools"][0], "Container field should be removed" + + def test_instructions_parameter_dropped(self): + """Test that instructions parameter is dropped for XAI""" + config = XAIResponsesAPIConfig() + + params = ResponsesAPIOptionalRequestParams( + instructions="You are a helpful assistant.", + temperature=0.7 + ) + + result = config.map_openai_params( + response_api_optional_params=params, + model="grok-4-fast", + drop_params=False + ) + + assert "instructions" not in result, "Instructions should be dropped" + assert result.get("temperature") == 0.7, "Other params should be preserved" + + def test_supported_params_excludes_instructions(self): + """Test that get_supported_openai_params excludes instructions""" + config = XAIResponsesAPIConfig() + supported = config.get_supported_openai_params("grok-4-fast") + + assert "instructions" not in supported, "instructions should not be supported" + assert "tools" in supported, "tools should be supported" + assert "temperature" in supported, "temperature should be supported" + assert "model" in supported, "model should be supported" + + def test_xai_responses_endpoint_url(self): + """Test that get_complete_url returns correct XAI endpoint""" + config = XAIResponsesAPIConfig() + + # Test with default XAI API base + url = config.get_complete_url(api_base=None, litellm_params={}) + assert url == "https://api.x.ai/v1/responses", f"Expected XAI responses endpoint, got {url}" + + # Test with custom api_base + custom_url = config.get_complete_url( + api_base="https://custom.x.ai/v1", + litellm_params={} + ) + assert custom_url == "https://custom.x.ai/v1/responses", f"Expected custom endpoint, got {custom_url}" + + # Test with trailing slash + url_with_slash = config.get_complete_url( + api_base="https://api.x.ai/v1/", + litellm_params={} + ) + assert url_with_slash == "https://api.x.ai/v1/responses", "Should handle trailing slash" + From df882240a7ae8e9abc7f9bf3f7d7458cffa45f18 Mon Sep 17 00:00:00 2001 From: Sameer Kankute Date: Sat, 8 Nov 2025 12:14:43 +0530 Subject: [PATCH 2/5] add the xai provider config above --- litellm/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index f487adb5fcd0..ce47a94e9145 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -7361,10 +7361,10 @@ def get_provider_responses_api_config( return litellm.AzureOpenAIOSeriesResponsesAPIConfig() else: return litellm.AzureOpenAIResponsesAPIConfig() - elif litellm.LlmProviders.LITELLM_PROXY == provider: - return litellm.LiteLLMProxyResponsesAPIConfig() elif litellm.LlmProviders.XAI == provider: return litellm.XAIResponsesAPIConfig() + elif litellm.LlmProviders.LITELLM_PROXY == provider: + return litellm.LiteLLMProxyResponsesAPIConfig() return None @staticmethod From eee252636090d58b8ee7ae028764e6757c319d7e Mon Sep 17 00:00:00 2001 From: Sameer Kankute Date: Sat, 8 Nov 2025 12:38:15 +0530 Subject: [PATCH 3/5] remove init file --- litellm/llms/xai/responses/__init__.py | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 litellm/llms/xai/responses/__init__.py diff --git a/litellm/llms/xai/responses/__init__.py b/litellm/llms/xai/responses/__init__.py deleted file mode 100644 index 9610a1197875..000000000000 --- a/litellm/llms/xai/responses/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# XAI Responses API -from .transformation import XAIResponsesAPIConfig - -__all__ = ["XAIResponsesAPIConfig"] - From a512d134696d58f9714bdbf40856e2758fae7457 Mon Sep 17 00:00:00 2001 From: Sameer Kankute Date: Sat, 8 Nov 2025 12:41:54 +0530 Subject: [PATCH 4/5] remove init file --- tests/test_litellm/llms/xai/responses/__init__.py | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 tests/test_litellm/llms/xai/responses/__init__.py diff --git a/tests/test_litellm/llms/xai/responses/__init__.py b/tests/test_litellm/llms/xai/responses/__init__.py deleted file mode 100644 index 451d016fb21d..000000000000 --- a/tests/test_litellm/llms/xai/responses/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# XAI Responses API tests - From 67fbde5a650e8b7a28276ce5b2da795ae5ddcc6f Mon Sep 17 00:00:00 2001 From: Sameer Kankute Date: Sat, 8 Nov 2025 13:06:37 +0530 Subject: [PATCH 5/5] Fix f string lint error --- litellm/llms/xai/responses/transformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/xai/responses/transformation.py b/litellm/llms/xai/responses/transformation.py index 5767177b7a64..bd422c8d81e0 100644 --- a/litellm/llms/xai/responses/transformation.py +++ b/litellm/llms/xai/responses/transformation.py @@ -85,7 +85,7 @@ def map_openai_params( # XAI supports code_interpreter but doesn't use the container field # Keep only the type field verbose_logger.debug( - f"XAI: Transforming code_interpreter tool, removing container field" + "XAI: Transforming code_interpreter tool, removing container field" ) transformed_tools.append({"type": "code_interpreter"}) else: