diff --git a/docs/static/generated/openai_responses_generated.py b/docs/static/generated/openai_responses_generated.py new file mode 100644 index 0000000000..ec604615df --- /dev/null +++ b/docs/static/generated/openai_responses_generated.py @@ -0,0 +1,5019 @@ +# NOTE: This file is auto-generated by scripts/generate_openai_responses.py +# Do not edit by hand. + +from typing import Annotated, Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union + +from pydantic import BaseModel, Field +from typing_extensions import Literal, NotRequired, Required, TypeAlias, TypedDict + +from llama_stack.schema_utils import json_schema_type, register_schema +OpenAIMetadata: TypeAlias = Dict[str, str] + + + +class OpenAICodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): + """ + :param type: Always `auto`. + :param file_ids: An optional list of uploaded files to make available to your code. +""" + type: Required[Literal["auto"]] + + file_ids: Sequence[str] + + +OpenAICodeInterpreterContainer: TypeAlias = Union[str, OpenAICodeInterpreterContainerCodeInterpreterToolAuto] + + + +class OpenAICodeInterpreter(TypedDict, total=False): + """ + :param container: The code interpreter container. + + Can be a container ID or an object that specifies uploaded file IDs to make + available to your code. + :param type: The type of the code interpreter tool. Always `code_interpreter`. +""" + container: Required[OpenAICodeInterpreterContainer] + + type: Required[Literal["code_interpreter"]] + + + + +class OpenAILocalShell(TypedDict, total=False): + """ + :param type: The type of the local shell tool. Always `local_shell`. +""" + type: Required[Literal["local_shell"]] + + + + +class OpenAIRankingOptions(TypedDict, total=False): + """ + :param ranker: The ranker to use for the file search. + :param score_threshold: The score threshold for the file search, a number between 0 and 1. + + Numbers closer to 1 will attempt to return only the most relevant results, but + may return fewer results. +""" + ranker: Literal["auto", "default-2024-11-15"] + + score_threshold: float + + + + +class OpenAIComparisonFilter(TypedDict, total=False): + """ + :param key: The key to compare against the value. + :param type: Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, + `nin`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + - `in`: in + - `nin`: not in + :param value: The value to compare against the attribute key; supports string, number, or + boolean types. +""" + key: Required[str] + + type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]] + + value: Required[Union[str, float, bool, Sequence[Union[str, float]]]] + + +OpenAIFilter: TypeAlias = Union[OpenAIComparisonFilter, object] + + + +class OpenAICompoundFilter(TypedDict, total=False): + """ + :param filters: Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + :param type: Type of operation: `and` or `or`. +""" + filters: Required[Iterable[OpenAIFilter]] + + type: Required[Literal["and", "or"]] + + +OpenAIFilters: TypeAlias = Union[OpenAIComparisonFilter, OpenAICompoundFilter] + + + +class OpenAIFileSearchToolParam(TypedDict, total=False): + """ + :param type: The type of the file search tool. Always `file_search`. + :param vector_store_ids: The IDs of the vector stores to search. + :param filters: A filter to apply. + :param max_num_results: The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + :param ranking_options: Ranking options for search. +""" + type: Required[Literal["file_search"]] + + vector_store_ids: Required[Sequence[str]] + + filters: Optional[OpenAIFilters] + + max_num_results: int + + ranking_options: OpenAIRankingOptions + + + + +class OpenAIImageGenerationInputImageMask(TypedDict, total=False): + """ + :param file_id: File ID for the mask image. + :param image_url: Base64-encoded mask image. +""" + file_id: str + + image_url: str + + + + +class OpenAIImageGeneration(TypedDict, total=False): + """ + :param type: The type of the image generation tool. Always `image_generation`. + :param background: Background type for the generated image. + + One of `transparent`, `opaque`, or `auto`. Default: `auto`. + :param input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and + `low`. Defaults to `low`. + :param input_image_mask: Optional mask for inpainting. + + Contains `image_url` (string, optional) and `file_id` (string, optional). + :param model: The image generation model to use. Default: `gpt-image-1`. + :param moderation: Moderation level for the generated image. Default: `auto`. + :param output_compression: Compression level for the output image. Default: 100. + :param output_format: The output format of the generated image. + + One of `png`, `webp`, or `jpeg`. Default: `png`. + :param partial_images: Number of partial images to generate in streaming mode, from 0 (default value) + to 3. + :param quality: The quality of the generated image. + + One of `low`, `medium`, `high`, or `auto`. Default: `auto`. + :param size: The size of the generated image. + + One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`. +""" + type: Required[Literal["image_generation"]] + + background: Literal["transparent", "opaque", "auto"] + + input_fidelity: Optional[Literal["high", "low"]] + + input_image_mask: OpenAIImageGenerationInputImageMask + + model: Literal["gpt-image-1", "gpt-image-1-mini"] + + moderation: Literal["auto", "low"] + + output_compression: int + + output_format: Literal["png", "webp", "jpeg"] + + partial_images: int + + quality: Literal["low", "medium", "high", "auto"] + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + + + + +class OpenAIUserLocation(TypedDict, total=False): + """ + :param type: The type of location approximation. Always `approximate`. + :param city: Free text input for the city of the user, e.g. `San Francisco`. + :param country: The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + :param region: Free text input for the region of the user, e.g. `California`. + :param timezone: The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. +""" + type: Required[Literal["approximate"]] + + city: Optional[str] + + country: Optional[str] + + region: Optional[str] + + timezone: Optional[str] + + + + +class OpenAIWebSearchPreviewToolParam(TypedDict, total=False): + """ + :param type: The type of the web search tool. + + One of `web_search_preview` or `web_search_preview_2025_03_11`. + :param search_context_size: High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + :param user_location: The user's location. +""" + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] + + search_context_size: Literal["low", "medium", "high"] + + user_location: Optional[OpenAIUserLocation] + + + + +class OpenAIUserLocation(TypedDict, total=False): + """ + :param city: Free text input for the city of the user, e.g. `San Francisco`. + :param country: The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + :param region: Free text input for the region of the user, e.g. `California`. + :param timezone: The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + :param type: The type of location approximation. Always `approximate`. +""" + city: Optional[str] + + country: Optional[str] + + region: Optional[str] + + timezone: Optional[str] + + type: Literal["approximate"] + + + + +class OpenAIFilters(TypedDict, total=False): + """ + :param allowed_domains: Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` +""" + allowed_domains: Optional[Sequence[str]] + + + + +class OpenAIWebSearchToolParam(TypedDict, total=False): + """ + :param type: The type of the web search tool. + + One of `web_search` or `web_search_2025_08_26`. + :param filters: Filters for the search. + :param search_context_size: High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + :param user_location: The approximate location of the user. +""" + type: Required[Literal["web_search", "web_search_2025_08_26"]] + + filters: Optional[OpenAIFilters] + + search_context_size: Literal["low", "medium", "high"] + + user_location: Optional[OpenAIUserLocation] + + + + +class OpenAIMcpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: bool + + tool_names: Sequence[str] + + + + +class OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: bool + + tool_names: Sequence[str] + + + + +class OpenAIMcpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """ + :param always: A filter object to specify which tools are allowed. + :param never: A filter object to specify which tools are allowed. +""" + always: OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways + + never: OpenAIMcpRequireApprovalMcpToolApprovalFilterNever + + +OpenAIMcpRequireApproval: TypeAlias = Union[OpenAIMcpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] + + + +class OpenAIMcpAllowedToolsMcpToolFilter(TypedDict, total=False): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: bool + + tool_names: Sequence[str] + + +OpenAIMcpAllowedTools: TypeAlias = Union[Sequence[str], OpenAIMcpAllowedToolsMcpToolFilter] + + + +class OpenAIMcp(TypedDict, total=False): + """ + :param server_label: A label for this MCP server, used to identify it in tool calls. + :param type: The type of the MCP tool. Always `mcp`. + :param allowed_tools: List of allowed tool names or a filter object. + :param authorization: An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + :param connector_id: Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + :param headers: Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + :param require_approval: Specify which of the MCP server's tools require approval. + :param server_description: Optional description of the MCP server, used to provide more context. + :param server_url: The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. +""" + server_label: Required[str] + + type: Required[Literal["mcp"]] + + allowed_tools: Optional[OpenAIMcpAllowedTools] + + authorization: str + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + + headers: Optional[Dict[str, str]] + + require_approval: Optional[OpenAIMcpRequireApproval] + + server_description: str + + server_url: str + + + + +class OpenAIComputerToolParam(TypedDict, total=False): + """ + :param display_height: The height of the computer display. + :param display_width: The width of the computer display. + :param environment: The type of computer environment to control. + :param type: The type of the computer use tool. Always `computer_use_preview`. +""" + display_height: Required[int] + + display_width: Required[int] + + environment: Required[Literal["windows", "mac", "linux", "ubuntu", "browser"]] + + type: Required[Literal["computer_use_preview"]] + + + + +class OpenAIGrammar(TypedDict, total=False): + """ + :param definition: The grammar definition. + :param syntax: The syntax of the grammar definition. One of `lark` or `regex`. + :param type: Grammar format. Always `grammar`. +""" + definition: Required[str] + + syntax: Required[Literal["lark", "regex"]] + + type: Required[Literal["grammar"]] + + + + +class OpenAIText(TypedDict, total=False): + """ + :param type: Unconstrained text format. Always `text`. +""" + type: Required[Literal["text"]] + + +OpenAICustomToolInputFormat: TypeAlias = Union[OpenAIText, OpenAIGrammar] + + + +class OpenAICustomToolParam(TypedDict, total=False): + """ + :param name: The name of the custom tool, used to identify it in tool calls. + :param type: The type of the custom tool. Always `custom`. + :param description: Optional description of the custom tool, used to provide more context. + :param format: The input format for the custom tool. Default is unconstrained text. +""" + name: Required[str] + + type: Required[Literal["custom"]] + + description: str + + format: OpenAICustomToolInputFormat + + + + +class OpenAIFunctionToolParam(TypedDict, total=False): + """ + :param name: The name of the function to call. + :param parameters: A JSON schema object describing the parameters of the function. + :param strict: Whether to enforce strict parameter validation. Default `true`. + :param type: The type of the function tool. Always `function`. + :param description: A description of the function. + + Used by the model to determine whether or not to call the function. +""" + name: Required[str] + + parameters: Required[Optional[Dict[str, object]]] + + strict: Required[Optional[bool]] + + type: Required[Literal["function"]] + + description: Optional[str] + + +OpenAIToolParam: TypeAlias = Union[ + OpenAIFunctionToolParam, + OpenAIFileSearchToolParam, + OpenAIComputerToolParam, + OpenAIWebSearchToolParam, + OpenAIMcp, + OpenAICodeInterpreter, + OpenAIImageGeneration, + OpenAILocalShell, + OpenAICustomToolParam, + OpenAIWebSearchPreviewToolParam, +] + + + +class OpenAIResponseConversationParam(TypedDict, total=False): + """ + :param id: The unique ID of the conversation. +""" + id: Required[str] + + +OpenAIConversation: TypeAlias = Union[str, OpenAIResponseConversationParam] + +OpenAIResponseIncludable: TypeAlias = Literal[ + "code_interpreter_call.outputs", + "computer_call_output.output.image_url", + "file_search_call.results", + "message.input_image.image_url", + "message.output_text.logprobs", + "reasoning.encrypted_content", +] + + + +class OpenAIResponseFormatJSONObject(TypedDict, total=False): + """ + :param type: The type of response format being defined. Always `json_object`. +""" + type: Required[Literal["json_object"]] + + + + +class OpenAIResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): + """ + :param name: The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + :param schema: The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + :param type: The type of response format being defined. Always `json_schema`. + :param description: A description of what the response format is for, used by the model to determine + how to respond in the format. + :param strict: Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +""" + name: Required[str] + + schema: Required[Dict[str, object]] + + type: Required[Literal["json_schema"]] + + description: str + + strict: Optional[bool] + + + + +class OpenAIResponseFormatText(TypedDict, total=False): + """ + :param type: The type of response format being defined. Always `text`. +""" + type: Required[Literal["text"]] + + +OpenAIResponseFormatTextConfigParam: TypeAlias = Union[ + OpenAIResponseFormatText, OpenAIResponseFormatTextJSONSchemaConfigParam, OpenAIResponseFormatJSONObject +] + + + +class OpenAIResponseTextConfigParam(TypedDict, total=False): + """ + :param format: An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + :param verbosity: Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. +""" + format: OpenAIResponseFormatTextConfigParam + + verbosity: Optional[Literal["low", "medium", "high"]] + + +OpenAIChatModel: TypeAlias = Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano-2025-04-14", + "o4-mini", + "o4-mini-2025-04-16", + "o3", + "o3-2025-04-16", + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-audio-preview-2025-06-03", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "codex-mini-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", +] + +OpenAIResponsesModel: TypeAlias = Union[ + str, + OpenAIChatModel, + Literal[ + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + ], +] + + + +class OpenAIResponseInputImageParam(TypedDict, total=False): + """ + :param detail: The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + :param type: The type of the input item. Always `input_image`. + :param file_id: The ID of the file to be sent to the model. + :param image_url: The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. +""" + detail: Required[Literal["low", "high", "auto"]] + + type: Required[Literal["input_image"]] + + file_id: Optional[str] + + image_url: Optional[str] + + + + +class OpenAIResponseInputFileParam(TypedDict, total=False): + """ + :param type: The type of the input item. Always `input_file`. + :param file_data: The content of the file to be sent to the model. + :param file_id: The ID of the file to be sent to the model. + :param file_url: The URL of the file to be sent to the model. + :param filename: The name of the file to be sent to the model. +""" + type: Required[Literal["input_file"]] + + file_data: str + + file_id: Optional[str] + + file_url: str + + filename: str + + + + +class OpenAIResponseInputTextParam(TypedDict, total=False): + """ + :param text: The text input to the model. + :param type: The type of the input item. Always `input_text`. +""" + text: Required[str] + + type: Required[Literal["input_text"]] + + +OpenAIVariables: TypeAlias = Union[str, OpenAIResponseInputTextParam, OpenAIResponseInputImageParam, OpenAIResponseInputFileParam] + + + +class OpenAIResponsePromptParam(TypedDict, total=False): + """ + :param id: The unique identifier of the prompt template to use. + :param variables: Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + :param version: Optional version of the prompt template. +""" + id: Required[str] + + variables: Optional[Dict[str, OpenAIVariables]] + + version: Optional[str] + + + + +class OpenAIStreamOptions(TypedDict, total=False): + """ + :param include_obfuscation: When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. +""" + include_obfuscation: bool + + + + +class OpenAIToolChoiceCustomParam(TypedDict, total=False): + """ + :param name: The name of the custom tool to call. + :param type: For custom tool calling, the type is always `custom`. +""" + name: Required[str] + + type: Required[Literal["custom"]] + + + + +class OpenAIToolChoiceAllowedParam(TypedDict, total=False): + """ + :param mode: Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + :param tools: A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + :param type: Allowed tool configuration type. Always `allowed_tools`. +""" + mode: Required[Literal["auto", "required"]] + + tools: Required[Iterable[Dict[str, object]]] + + type: Required[Literal["allowed_tools"]] + + +OpenAIToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] + + + +class OpenAIToolChoiceMcpParam(TypedDict, total=False): + """ + :param server_label: The label of the MCP server to use. + :param type: For MCP tools, the type is always `mcp`. + :param name: The name of the tool to call on the server. +""" + server_label: Required[str] + + type: Required[Literal["mcp"]] + + name: Optional[str] + + + + +class OpenAIToolChoiceTypesParam(TypedDict, total=False): + """ + :param type: The type of hosted tool the model should to use. + + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + - `code_interpreter` + - `image_generation` +""" + type: Required[ + Literal[ + "file_search", + "web_search_preview", + "computer_use_preview", + "web_search_preview_2025_03_11", + "image_generation", + "code_interpreter", + ] + ] + + + + +class OpenAIToolChoiceFunctionParam(TypedDict, total=False): + """ + :param name: The name of the function to call. + :param type: For function calling, the type is always `function`. +""" + name: Required[str] + + type: Required[Literal["function"]] + + +OpenAIToolChoice: TypeAlias = Union[ + OpenAIToolChoiceOptions, + OpenAIToolChoiceAllowedParam, + OpenAIToolChoiceTypesParam, + OpenAIToolChoiceFunctionParam, + OpenAIToolChoiceMcpParam, + OpenAIToolChoiceCustomParam, +] + +OpenAIReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] + + + +class OpenAIReasoning(TypedDict, total=False): + """ + :param effort: Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + :param generate_summary: **Deprecated:** use `summary` instead. + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `auto`, + `concise`, or `detailed`. + :param summary: A summary of the reasoning performed by the model. + + This can be useful for debugging and understanding the model's reasoning + process. One of `auto`, `concise`, or `detailed`. +""" + effort: Optional[OpenAIReasoningEffort] + + generate_summary: Optional[Literal["auto", "concise", "detailed"]] + + summary: Optional[Literal["auto", "concise", "detailed"]] + + + + +class OpenAIActionClick(TypedDict, total=False): + """ + :param button: Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + :param type: Specifies the event type. + + For a click action, this property is always set to `click`. + :param x: The x-coordinate where the click occurred. + :param y: The y-coordinate where the click occurred. +""" + button: Required[Literal["left", "right", "wheel", "back", "forward"]] + + type: Required[Literal["click"]] + + x: Required[int] + + y: Required[int] + + + + +class OpenAIActionKeypress(TypedDict, total=False): + """ + :param keys: The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + :param type: Specifies the event type. + + For a keypress action, this property is always set to `keypress`. +""" + keys: Required[Sequence[str]] + + type: Required[Literal["keypress"]] + + + + +class OpenAIActionDragPath(TypedDict, total=False): + """ + :param x: The x-coordinate. + :param y: The y-coordinate. +""" + x: Required[int] + + y: Required[int] + + + + +class OpenAIActionDrag(TypedDict, total=False): + """ + :param path: An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + :param type: Specifies the event type. + + For a drag action, this property is always set to `drag`. +""" + path: Required[Iterable[OpenAIActionDragPath]] + + type: Required[Literal["drag"]] + + + + +class OpenAIActionMove(TypedDict, total=False): + """ + :param type: Specifies the event type. + + For a move action, this property is always set to `move`. + :param x: The x-coordinate to move to. + :param y: The y-coordinate to move to. +""" + type: Required[Literal["move"]] + + x: Required[int] + + y: Required[int] + + + + +class OpenAIActionWait(TypedDict, total=False): + """ + :param type: Specifies the event type. + + For a wait action, this property is always set to `wait`. +""" + type: Required[Literal["wait"]] + + + + +class OpenAIActionScroll(TypedDict, total=False): + """ + :param scroll_x: The horizontal scroll distance. + :param scroll_y: The vertical scroll distance. + :param type: Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + :param x: The x-coordinate where the scroll occurred. + :param y: The y-coordinate where the scroll occurred. +""" + scroll_x: Required[int] + + scroll_y: Required[int] + + type: Required[Literal["scroll"]] + + x: Required[int] + + y: Required[int] + + + + +class OpenAIActionScreenshot(TypedDict, total=False): + """ + :param type: Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. +""" + type: Required[Literal["screenshot"]] + + + + +class OpenAIActionType(TypedDict, total=False): + """ + :param text: The text to type. + :param type: Specifies the event type. + + For a type action, this property is always set to `type`. +""" + text: Required[str] + + type: Required[Literal["type"]] + + + + +class OpenAIActionDoubleClick(TypedDict, total=False): + """ + :param type: Specifies the event type. + + For a double click action, this property is always set to `double_click`. + :param x: The x-coordinate where the double click occurred. + :param y: The y-coordinate where the double click occurred. +""" + type: Required[Literal["double_click"]] + + x: Required[int] + + y: Required[int] + + +OpenAIAction: TypeAlias = Union[ + OpenAIActionClick, + OpenAIActionDoubleClick, + OpenAIActionDrag, + OpenAIActionKeypress, + OpenAIActionMove, + OpenAIActionScreenshot, + OpenAIActionScroll, + OpenAIActionType, + OpenAIActionWait, +] + + + +class OpenAIPendingSafetyCheck(TypedDict, total=False): + """ + :param id: The ID of the pending safety check. + :param code: The type of the pending safety check. + :param message: Details about the pending safety check. +""" + id: Required[str] + + code: Required[str] + + message: Required[str] + + + + +class OpenAIResponseComputerToolCallParam(TypedDict, total=False): + """ + :param id: The unique ID of the computer call. + :param action: A click action. + :param call_id: An identifier used when responding to the tool call with output. + :param pending_safety_checks: The pending safety checks for the computer call. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + :param type: The type of the computer call. Always `computer_call`. +""" + id: Required[str] + + action: Required[OpenAIAction] + + call_id: Required[str] + + pending_safety_checks: Required[Iterable[OpenAIPendingSafetyCheck]] + + status: Required[Literal["in_progress", "completed", "incomplete"]] + + type: Required[Literal["computer_call"]] + + + + +class OpenAIItemReference(TypedDict, total=False): + """ + :param id: The ID of the item to reference. + :param type: The type of item to reference. Always `item_reference`. +""" + id: Required[str] + + type: Optional[Literal["item_reference"]] + + + + +class OpenAIImageGenerationCall(TypedDict, total=False): + """ + :param id: The unique ID of the image generation call. + :param result: The generated image encoded in base64. + :param status: The status of the image generation call. + :param type: The type of the image generation call. Always `image_generation_call`. +""" + id: Required[str] + + result: Required[Optional[str]] + + status: Required[Literal["in_progress", "completed", "generating", "failed"]] + + type: Required[Literal["image_generation_call"]] + + +OpenAIOutputOutputContentList: TypeAlias = Union[OpenAIResponseInputTextParam, OpenAIResponseInputImageParam, OpenAIResponseInputFileParam] + + + +class OpenAIResponseCustomToolCallOutputParam(TypedDict, total=False): + """ + :param call_id: The call ID, used to map this custom tool call output to a custom tool call. + :param output: The output from the custom tool call generated by your code. Can be a string or + an list of output content. + :param type: The type of the custom tool call output. Always `custom_tool_call_output`. + :param id: The unique ID of the custom tool call output in the OpenAI platform. +""" + call_id: Required[str] + + output: Required[Union[str, Iterable[OpenAIOutputOutputContentList]]] + + type: Required[Literal["custom_tool_call_output"]] + + id: str + + + + +class OpenAIActionOpenPage(TypedDict, total=False): + """ + :param type: The action type. + :param url: The URL opened by the model. +""" + type: Required[Literal["open_page"]] + + url: Required[str] + + + + +class OpenAIActionSearchSource(TypedDict, total=False): + """ + :param type: The type of source. Always `url`. + :param url: The URL of the source. +""" + type: Required[Literal["url"]] + + url: Required[str] + + + + +class OpenAIActionSearch(TypedDict, total=False): + """ + :param query: The search query. + :param type: The action type. + :param sources: The sources used in the search. +""" + query: Required[str] + + type: Required[Literal["search"]] + + sources: Iterable[OpenAIActionSearchSource] + + + + +class OpenAIActionFind(TypedDict, total=False): + """ + :param pattern: The pattern or text to search for within the page. + :param type: The action type. + :param url: The URL of the page searched for the pattern. +""" + pattern: Required[str] + + type: Required[Literal["find"]] + + url: Required[str] + + +OpenAIAction: TypeAlias = Union[OpenAIActionSearch, OpenAIActionOpenPage, OpenAIActionFind] + + + +class OpenAIResponseFunctionWebSearchParam(TypedDict, total=False): + """ + :param id: The unique ID of the web search tool call. + :param action: An object describing the specific action taken in this web search call. Includes + details on how the model used the web (search, open_page, find). + :param status: The status of the web search tool call. + :param type: The type of the web search tool call. Always `web_search_call`. +""" + id: Required[str] + + action: Required[OpenAIAction] + + status: Required[Literal["in_progress", "searching", "completed", "failed"]] + + type: Required[Literal["web_search_call"]] + + + + +class OpenAIResponseInputTextContentParam(TypedDict, total=False): + """ + :param text: The text input to the model. + :param type: The type of the input item. Always `input_text`. +""" + text: Required[str] + + type: Required[Literal["input_text"]] + + + + +class OpenAIResponseInputFileContentParam(TypedDict, total=False): + """ + :param type: The type of the input item. Always `input_file`. + :param file_data: The base64-encoded data of the file to be sent to the model. + :param file_id: The ID of the file to be sent to the model. + :param file_url: The URL of the file to be sent to the model. + :param filename: The name of the file to be sent to the model. +""" + type: Required[Literal["input_file"]] + + file_data: Optional[str] + + file_id: Optional[str] + + file_url: Optional[str] + + filename: Optional[str] + + + + +class OpenAIResponseInputImageContentParam(TypedDict, total=False): + """ + :param type: The type of the input item. Always `input_image`. + :param detail: The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + :param file_id: The ID of the file to be sent to the model. + :param image_url: The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. +""" + type: Required[Literal["input_image"]] + + detail: Optional[Literal["low", "high", "auto"]] + + file_id: Optional[str] + + image_url: Optional[str] + + +OpenAIResponseFunctionCallOutputItemParam: TypeAlias = Union[ + OpenAIResponseInputTextContentParam, OpenAIResponseInputImageContentParam, OpenAIResponseInputFileContentParam +] + +OpenAIResponseFunctionCallOutputItemListParam: TypeAlias = List[OpenAIResponseFunctionCallOutputItemParam] + + + +class OpenAIFunctionCallOutput(TypedDict, total=False): + """ + :param call_id: The unique ID of the function tool call generated by the model. + :param output: Text, image, or file output of the function tool call. + :param type: The type of the function tool call output. Always `function_call_output`. + :param id: The unique ID of the function tool call output. + + Populated when this item is returned via API. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + call_id: Required[str] + + output: Required[Union[str, OpenAIResponseFunctionCallOutputItemListParam]] + + type: Required[Literal["function_call_output"]] + + id: Optional[str] + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + + + + +class OpenAIResult(TypedDict, total=False): + """ + :param attributes: Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + :param file_id: The unique ID of the file. + :param filename: The name of the file. + :param score: The relevance score of the file - a value between 0 and 1. + :param text: The text that was retrieved from the file. +""" + attributes: Optional[Dict[str, Union[str, float, bool]]] + + file_id: str + + filename: str + + score: float + + text: str + + + + +class OpenAIResponseFileSearchToolCallParam(TypedDict, total=False): + """ + :param id: The unique ID of the file search tool call. + :param queries: The queries used to search for files. + :param status: The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + :param type: The type of the file search tool call. Always `file_search_call`. + :param results: The results of the file search tool call. +""" + id: Required[str] + + queries: Required[Sequence[str]] + + status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]] + + type: Required[Literal["file_search_call"]] + + results: Optional[Iterable[OpenAIResult]] + + + + +class OpenAIMcpListToolsTool(TypedDict, total=False): + """ + :param input_schema: The JSON schema describing the tool's input. + :param name: The name of the tool. + :param annotations: Additional annotations about the tool. + :param description: The description of the tool. +""" + input_schema: Required[object] + + name: Required[str] + + annotations: Optional[object] + + description: Optional[str] + + + + +class OpenAIMcpListTools(TypedDict, total=False): + """ + :param id: The unique ID of the list. + :param server_label: The label of the MCP server. + :param tools: The tools available on the server. + :param type: The type of the item. Always `mcp_list_tools`. + :param error: Error message if the server could not list tools. +""" + id: Required[str] + + server_label: Required[str] + + tools: Required[Iterable[OpenAIMcpListToolsTool]] + + type: Required[Literal["mcp_list_tools"]] + + error: Optional[str] + + + + +class OpenAIComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + """ + :param id: The ID of the pending safety check. + :param code: The type of the pending safety check. + :param message: Details about the pending safety check. +""" + id: Required[str] + + code: Optional[str] + + message: Optional[str] + + + + +class OpenAIResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False): + """ + :param type: Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + :param file_id: The identifier of an uploaded file that contains the screenshot. + :param image_url: The URL of the screenshot image. +""" + type: Required[Literal["computer_screenshot"]] + + file_id: str + + image_url: str + + + + +class OpenAIComputerCallOutput(TypedDict, total=False): + """ + :param call_id: The ID of the computer tool call that produced the output. + :param output: A computer screenshot image used with the computer use tool. + :param type: The type of the computer tool call output. Always `computer_call_output`. + :param id: The ID of the computer tool call output. + :param acknowledged_safety_checks: The safety checks reported by the API that have been acknowledged by the + developer. + :param status: The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. +""" + call_id: Required[str] + + output: Required[OpenAIResponseComputerToolCallOutputScreenshotParam] + + type: Required[Literal["computer_call_output"]] + + id: Optional[str] + + acknowledged_safety_checks: Optional[Iterable[OpenAIComputerCallOutputAcknowledgedSafetyCheck]] + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + + + + +class OpenAILogprobTopLogprob(TypedDict, total=False): + token: Required[str] + + bytes: Required[Iterable[int]] + + logprob: Required[float] + + + + +class OpenAILogprob(TypedDict, total=False): + token: Required[str] + + bytes: Required[Iterable[int]] + + logprob: Required[float] + + top_logprobs: Required[Iterable[OpenAILogprobTopLogprob]] + + + + +class OpenAIAnnotationFileCitation(TypedDict, total=False): + """ + :param file_id: The ID of the file. + :param filename: The filename of the file cited. + :param index: The index of the file in the list of files. + :param type: The type of the file citation. Always `file_citation`. +""" + file_id: Required[str] + + filename: Required[str] + + index: Required[int] + + type: Required[Literal["file_citation"]] + + + + +class OpenAIAnnotationURLCitation(TypedDict, total=False): + """ + :param end_index: The index of the last character of the URL citation in the message. + :param start_index: The index of the first character of the URL citation in the message. + :param title: The title of the web resource. + :param type: The type of the URL citation. Always `url_citation`. + :param url: The URL of the web resource. +""" + end_index: Required[int] + + start_index: Required[int] + + title: Required[str] + + type: Required[Literal["url_citation"]] + + url: Required[str] + + + + +class OpenAIAnnotationFilePath(TypedDict, total=False): + """ + :param file_id: The ID of the file. + :param index: The index of the file in the list of files. + :param type: The type of the file path. Always `file_path`. +""" + file_id: Required[str] + + index: Required[int] + + type: Required[Literal["file_path"]] + + + + +class OpenAIAnnotationContainerFileCitation(TypedDict, total=False): + """ + :param container_id: The ID of the container file. + :param end_index: The index of the last character of the container file citation in the message. + :param file_id: The ID of the file. + :param filename: The filename of the container file cited. + :param start_index: The index of the first character of the container file citation in the message. + :param type: The type of the container file citation. Always `container_file_citation`. +""" + container_id: Required[str] + + end_index: Required[int] + + file_id: Required[str] + + filename: Required[str] + + start_index: Required[int] + + type: Required[Literal["container_file_citation"]] + + +OpenAIAnnotation: TypeAlias = Union[ + OpenAIAnnotationFileCitation, OpenAIAnnotationURLCitation, OpenAIAnnotationContainerFileCitation, OpenAIAnnotationFilePath +] + + + +class OpenAIResponseOutputTextParam(TypedDict, total=False): + """ + :param annotations: The annotations of the text output. + :param text: The text output from the model. + :param type: The type of the output text. Always `output_text`. +""" + annotations: Required[Iterable[OpenAIAnnotation]] + + text: Required[str] + + type: Required[Literal["output_text"]] + + logprobs: Iterable[OpenAILogprob] + + + + +class OpenAIResponseOutputRefusalParam(TypedDict, total=False): + """ + :param refusal: The refusal explanation from the model. + :param type: The type of the refusal. Always `refusal`. +""" + refusal: Required[str] + + type: Required[Literal["refusal"]] + + +OpenAIContent: TypeAlias = Union[OpenAIResponseOutputTextParam, OpenAIResponseOutputRefusalParam] + + + +class OpenAIResponseOutputMessageParam(TypedDict, total=False): + """ + :param id: The unique ID of the output message. + :param content: The content of the output message. + :param role: The role of the output message. Always `assistant`. + :param status: The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + :param type: The type of the output message. Always `message`. +""" + id: Required[str] + + content: Required[Iterable[OpenAIContent]] + + role: Required[Literal["assistant"]] + + status: Required[Literal["in_progress", "completed", "incomplete"]] + + type: Required[Literal["message"]] + + + + +class OpenAIInputAudio(TypedDict, total=False): + """ + :param data: Base64-encoded audio data. + :param format: The format of the audio data. Currently supported formats are `mp3` and `wav`. +""" + data: Required[str] + + format: Required[Literal["mp3", "wav"]] + + + + +class OpenAIResponseInputAudioParam(TypedDict, total=False): + """ + :param type: The type of the input item. Always `input_audio`. +""" + input_audio: Required[OpenAIInputAudio] + + type: Required[Literal["input_audio"]] + + +OpenAIResponseInputContentParam: TypeAlias = Union[ + OpenAIResponseInputTextParam, OpenAIResponseInputImageParam, OpenAIResponseInputFileParam, OpenAIResponseInputAudioParam +] + +OpenAIResponseInputMessageContentListParam: TypeAlias = List[OpenAIResponseInputContentParam] + + + +class OpenAIEasyInputMessageParam(TypedDict, total=False): + """ + :param content: Text, image, or audio input to the model, used to generate a response. Can also + contain previous assistant responses. + :param role: The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + :param type: The type of the message input. Always `message`. +""" + content: Required[Union[str, OpenAIResponseInputMessageContentListParam]] + + role: Required[Literal["user", "assistant", "system", "developer"]] + + type: Literal["message"] + + + + +class OpenAILocalShellCallOutput(TypedDict, total=False): + """ + :param id: The unique ID of the local shell tool call generated by the model. + :param output: A JSON string of the output of the local shell tool call. + :param type: The type of the local shell tool call output. Always `local_shell_call_output`. + :param status: The status of the item. One of `in_progress`, `completed`, or `incomplete`. +""" + id: Required[str] + + output: Required[str] + + type: Required[Literal["local_shell_call_output"]] + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + + + + +class OpenAIMcpCall(TypedDict, total=False): + """ + :param id: The unique ID of the tool call. + :param arguments: A JSON string of the arguments passed to the tool. + :param name: The name of the tool that was run. + :param server_label: The label of the MCP server running the tool. + :param type: The type of the item. Always `mcp_call`. + :param approval_request_id: Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + :param error: The error from the tool call, if any. + :param output: The output from the tool call. + :param status: The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. +""" + id: Required[str] + + arguments: Required[str] + + name: Required[str] + + server_label: Required[str] + + type: Required[Literal["mcp_call"]] + + approval_request_id: Optional[str] + + error: Optional[str] + + output: Optional[str] + + status: Literal["in_progress", "completed", "incomplete", "calling", "failed"] + + + + +class OpenAIResponseCustomToolCallParam(TypedDict, total=False): + """ + :param call_id: An identifier used to map this custom tool call to a tool call output. + :param input: The input for the custom tool call generated by the model. + :param name: The name of the custom tool being called. + :param type: The type of the custom tool call. Always `custom_tool_call`. + :param id: The unique ID of the custom tool call in the OpenAI platform. +""" + call_id: Required[str] + + input: Required[str] + + name: Required[str] + + type: Required[Literal["custom_tool_call"]] + + id: str + + + + +class OpenAIOutputLogs(TypedDict, total=False): + """ + :param logs: The logs output from the code interpreter. + :param type: The type of the output. Always 'logs'. +""" + logs: Required[str] + + type: Required[Literal["logs"]] + + + + +class OpenAIOutputImage(TypedDict, total=False): + """ + :param type: The type of the output. Always 'image'. + :param url: The URL of the image output from the code interpreter. +""" + type: Required[Literal["image"]] + + url: Required[str] + + +OpenAIOutput: TypeAlias = Union[OpenAIOutputLogs, OpenAIOutputImage] + + + +class OpenAIResponseCodeInterpreterToolCallParam(TypedDict, total=False): + """ + :param id: The unique ID of the code interpreter tool call. + :param code: The code to run, or null if not available. + :param container_id: The ID of the container used to run the code. + :param outputs: The outputs generated by the code interpreter, such as logs or images. Can be + null if no outputs are available. + :param status: The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + :param type: The type of the code interpreter tool call. Always `code_interpreter_call`. +""" + id: Required[str] + + code: Required[Optional[str]] + + container_id: Required[str] + + outputs: Required[Optional[Iterable[OpenAIOutput]]] + + status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] + + type: Required[Literal["code_interpreter_call"]] + + + + +class OpenAISummary(TypedDict, total=False): + """ + :param text: A summary of the reasoning output from the model so far. + :param type: The type of the object. Always `summary_text`. +""" + text: Required[str] + + type: Required[Literal["summary_text"]] + + + + +class OpenAIContent(TypedDict, total=False): + """ + :param text: The reasoning text from the model. + :param type: The type of the reasoning text. Always `reasoning_text`. +""" + text: Required[str] + + type: Required[Literal["reasoning_text"]] + + + + +class OpenAIResponseReasoningItemParam(TypedDict, total=False): + """ + :param id: The unique identifier of the reasoning content. + :param summary: Reasoning summary content. + :param type: The type of the object. Always `reasoning`. + :param content: Reasoning text content. + :param encrypted_content: The encrypted content of the reasoning item - populated when a response is + generated with `reasoning.encrypted_content` in the `include` parameter. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + id: Required[str] + + summary: Required[Iterable[OpenAISummary]] + + type: Required[Literal["reasoning"]] + + content: Iterable[OpenAIContent] + + encrypted_content: Optional[str] + + status: Literal["in_progress", "completed", "incomplete"] + + + + +class OpenAIMcpApprovalResponse(TypedDict, total=False): + """ + :param approval_request_id: The ID of the approval request being answered. + :param approve: Whether the request was approved. + :param type: The type of the item. Always `mcp_approval_response`. + :param id: The unique ID of the approval response + :param reason: Optional reason for the decision. +""" + approval_request_id: Required[str] + + approve: Required[bool] + + type: Required[Literal["mcp_approval_response"]] + + id: Optional[str] + + reason: Optional[str] + + + + +class OpenAIMcpApprovalRequest(TypedDict, total=False): + """ + :param id: The unique ID of the approval request. + :param arguments: A JSON string of arguments for the tool. + :param name: The name of the tool to run. + :param server_label: The label of the MCP server making the request. + :param type: The type of the item. Always `mcp_approval_request`. +""" + id: Required[str] + + arguments: Required[str] + + name: Required[str] + + server_label: Required[str] + + type: Required[Literal["mcp_approval_request"]] + + + + +class OpenAILocalShellCallAction(TypedDict, total=False): + """ + :param command: The command to run. + :param env: Environment variables to set for the command. + :param type: The type of the local shell action. Always `exec`. + :param timeout_ms: Optional timeout in milliseconds for the command. + :param user: Optional user to run the command as. + :param working_directory: Optional working directory to run the command in. +""" + command: Required[Sequence[str]] + + env: Required[Dict[str, str]] + + type: Required[Literal["exec"]] + + timeout_ms: Optional[int] + + user: Optional[str] + + working_directory: Optional[str] + + + + +class OpenAILocalShellCall(TypedDict, total=False): + """ + :param id: The unique ID of the local shell call. + :param action: Execute a shell command on the server. + :param call_id: The unique ID of the local shell tool call generated by the model. + :param status: The status of the local shell call. + :param type: The type of the local shell call. Always `local_shell_call`. +""" + id: Required[str] + + action: Required[OpenAILocalShellCallAction] + + call_id: Required[str] + + status: Required[Literal["in_progress", "completed", "incomplete"]] + + type: Required[Literal["local_shell_call"]] + + + + +class OpenAIResponseFunctionToolCallParam(TypedDict, total=False): + """ + :param arguments: A JSON string of the arguments to pass to the function. + :param call_id: The unique ID of the function tool call generated by the model. + :param name: The name of the function to run. + :param type: The type of the function tool call. Always `function_call`. + :param id: The unique ID of the function tool call. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + arguments: Required[str] + + call_id: Required[str] + + name: Required[str] + + type: Required[Literal["function_call"]] + + id: str + + status: Literal["in_progress", "completed", "incomplete"] + + + + +class OpenAIMessage(TypedDict, total=False): + """ + :param content: A list of one or many input items to the model, containing different content + types. + :param role: The role of the message input. One of `user`, `system`, or `developer`. + :param status: The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + :param type: The type of the message input. Always set to `message`. +""" + content: Required[OpenAIResponseInputMessageContentListParam] + + role: Required[Literal["user", "system", "developer"]] + + status: Literal["in_progress", "completed", "incomplete"] + + type: Literal["message"] + + +OpenAIResponseInputItemParam: TypeAlias = Union[ + OpenAIEasyInputMessageParam, + OpenAIMessage, + OpenAIResponseOutputMessageParam, + OpenAIResponseFileSearchToolCallParam, + OpenAIResponseComputerToolCallParam, + OpenAIComputerCallOutput, + OpenAIResponseFunctionWebSearchParam, + OpenAIResponseFunctionToolCallParam, + OpenAIFunctionCallOutput, + OpenAIResponseReasoningItemParam, + OpenAIImageGenerationCall, + OpenAIResponseCodeInterpreterToolCallParam, + OpenAILocalShellCall, + OpenAILocalShellCallOutput, + OpenAIMcpListTools, + OpenAIMcpApprovalRequest, + OpenAIMcpApprovalResponse, + OpenAIMcpCall, + OpenAIResponseCustomToolCallOutputParam, + OpenAIResponseCustomToolCallParam, + OpenAIItemReference, +] + +OpenAIResponseInputParam: TypeAlias = List[OpenAIResponseInputItemParam] + + + +class OpenAIResponseCreateParamsBase(TypedDict, total=False): + """ + :param background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + :param conversation: The conversation that this response belongs to. + + Items from this conversation are prepended to `input_items` for this response + request. Input items and output items from this response are automatically added + to this conversation after this response completes. + :param include: Specify additional output data to include in the model response. + + Currently supported values are: + + - `web_search_call.action.sources`: Include the sources of the web search tool + call. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + :param input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + :param instructions: A system (or developer) message inserted into the model's context. + + When using along with `previous_response_id`, the instructions from a previous + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. + :param max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + :param max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + :param metadata: Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + :param model: Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + :param parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + :param previous_response_id: The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + :param prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + :param prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + :param reasoning: **gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + :param safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + :param service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. + - When not set, the default behavior is 'auto'. + + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. + :param store: Whether to store the generated model response for later retrieval via API. + :param stream_options: Options for streaming responses. Only set this when you set `stream: true`. + :param temperature: What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + :param text: Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + :param tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + :param tools: An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + We support the following categories of tools: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code with strongly typed arguments and outputs. + Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + :param truncation: The truncation strategy to use for the model response. + + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size + for a model, the request will fail with a 400 error. + :param user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. + + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). +""" + background: Optional[bool] + + conversation: Optional[OpenAIConversation] + + include: Optional[List[OpenAIResponseIncludable]] + + input: Union[str, OpenAIResponseInputParam] + + instructions: Optional[str] + + max_output_tokens: Optional[int] + + max_tool_calls: Optional[int] + + metadata: Optional[OpenAIMetadata] + + model: OpenAIResponsesModel + + parallel_tool_calls: Optional[bool] + + previous_response_id: Optional[str] + + prompt: Optional[OpenAIResponsePromptParam] + + prompt_cache_key: str + + reasoning: Optional[OpenAIReasoning] + + safety_identifier: str + + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] + + store: Optional[bool] + + stream_options: Optional[OpenAIStreamOptions] + + temperature: Optional[float] + + text: OpenAIResponseTextConfigParam + + tool_choice: OpenAIToolChoice + + tools: Iterable[OpenAIToolParam] + + top_logprobs: Optional[int] + + top_p: Optional[float] + + truncation: Optional[Literal["auto", "disabled"]] + + user: str + + + + +@json_schema_type +class OpenAIResponseCreateParamsNonStreaming(OpenAIResponseCreateParamsBase, total=False): + """ + :param stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. +""" + stream: Optional[Literal[False]] + + + + +@json_schema_type +class OpenAIResponseCreateParamsStreaming(OpenAIResponseCreateParamsBase): + """ + :param stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + for more information. +""" + stream: Required[Literal[True]] + + +OpenAIResponseCreateParams = Union[OpenAIResponseCreateParamsNonStreaming, OpenAIResponseCreateParamsStreaming] + +OpenAIMetadata: TypeAlias = Dict[str, str] + + + +@json_schema_type +class OpenAIConversation(BaseModel): + """ + :param id: The unique ID of the conversation. +""" + id: str + + + + +@json_schema_type +class OpenAIResult(BaseModel): + """ + :param attributes: Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + :param file_id: The unique ID of the file. + :param filename: The name of the file. + :param score: The relevance score of the file - a value between 0 and 1. + :param text: The text that was retrieved from the file. +""" + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + + file_id: Optional[str] = None + + filename: Optional[str] = None + + score: Optional[float] = None + + text: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseFileSearchToolCall(BaseModel): + """ + :param id: The unique ID of the file search tool call. + :param queries: The queries used to search for files. + :param status: The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + :param type: The type of the file search tool call. Always `file_search_call`. + :param results: The results of the file search tool call. +""" + id: str + + queries: List[str] + + status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] + + type: Literal["file_search_call"] + + results: Optional[List[OpenAIResult]] = None + + + + +@json_schema_type +class OpenAIItemReference(BaseModel): + """ + :param id: The ID of the item to reference. + :param type: The type of item to reference. Always `item_reference`. +""" + id: str + + type: Optional[Literal["item_reference"]] = None + + + + +@json_schema_type +class OpenAIActionClick(BaseModel): + """ + :param button: Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + :param type: Specifies the event type. + + For a click action, this property is always set to `click`. + :param x: The x-coordinate where the click occurred. + :param y: The y-coordinate where the click occurred. +""" + button: Literal["left", "right", "wheel", "back", "forward"] + + type: Literal["click"] + + x: int + + y: int + + + + +@json_schema_type +class OpenAIActionKeypress(BaseModel): + """ + :param keys: The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + :param type: Specifies the event type. + + For a keypress action, this property is always set to `keypress`. +""" + keys: List[str] + + type: Literal["keypress"] + + + + +@json_schema_type +class OpenAIActionDragPath(BaseModel): + """ + :param x: The x-coordinate. + :param y: The y-coordinate. +""" + x: int + + y: int + + + + +@json_schema_type +class OpenAIActionDrag(BaseModel): + """ + :param path: An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + :param type: Specifies the event type. + + For a drag action, this property is always set to `drag`. +""" + path: List[OpenAIActionDragPath] + + type: Literal["drag"] + + + + +@json_schema_type +class OpenAIActionMove(BaseModel): + """ + :param type: Specifies the event type. + + For a move action, this property is always set to `move`. + :param x: The x-coordinate to move to. + :param y: The y-coordinate to move to. +""" + type: Literal["move"] + + x: int + + y: int + + + + +@json_schema_type +class OpenAIActionWait(BaseModel): + """ + :param type: Specifies the event type. + + For a wait action, this property is always set to `wait`. +""" + type: Literal["wait"] + + + + +@json_schema_type +class OpenAIActionScroll(BaseModel): + """ + :param scroll_x: The horizontal scroll distance. + :param scroll_y: The vertical scroll distance. + :param type: Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + :param x: The x-coordinate where the scroll occurred. + :param y: The y-coordinate where the scroll occurred. +""" + scroll_x: int + + scroll_y: int + + type: Literal["scroll"] + + x: int + + y: int + + + + +@json_schema_type +class OpenAIActionScreenshot(BaseModel): + """ + :param type: Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. +""" + type: Literal["screenshot"] + + + + +@json_schema_type +class OpenAIActionType(BaseModel): + """ + :param text: The text to type. + :param type: Specifies the event type. + + For a type action, this property is always set to `type`. +""" + text: str + + type: Literal["type"] + + + + +@json_schema_type +class OpenAIActionDoubleClick(BaseModel): + """ + :param type: Specifies the event type. + + For a double click action, this property is always set to `double_click`. + :param x: The x-coordinate where the double click occurred. + :param y: The y-coordinate where the double click occurred. +""" + type: Literal["double_click"] + + x: int + + y: int + + +OpenAIAction: TypeAlias = Annotated[ + Union[ + OpenAIActionClick, + OpenAIActionDoubleClick, + OpenAIActionDrag, + OpenAIActionKeypress, + OpenAIActionMove, + OpenAIActionScreenshot, + OpenAIActionScroll, + OpenAIActionType, + OpenAIActionWait, + ], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAIPendingSafetyCheck(BaseModel): + """ + :param id: The ID of the pending safety check. + :param code: The type of the pending safety check. + :param message: Details about the pending safety check. +""" + id: str + + code: str + + message: str + + + + +@json_schema_type +class OpenAIResponseComputerToolCall(BaseModel): + """ + :param id: The unique ID of the computer call. + :param action: A click action. + :param call_id: An identifier used when responding to the tool call with output. + :param pending_safety_checks: The pending safety checks for the computer call. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + :param type: The type of the computer call. Always `computer_call`. +""" + id: str + + action: OpenAIAction + + call_id: str + + pending_safety_checks: List[OpenAIPendingSafetyCheck] + + status: Literal["in_progress", "completed", "incomplete"] + + type: Literal["computer_call"] + + + + +@json_schema_type +class OpenAIResponseInputFile(BaseModel): + """ + :param type: The type of the input item. Always `input_file`. + :param file_data: The content of the file to be sent to the model. + :param file_id: The ID of the file to be sent to the model. + :param file_url: The URL of the file to be sent to the model. + :param filename: The name of the file to be sent to the model. +""" + type: Literal["input_file"] + + file_data: Optional[str] = None + + file_id: Optional[str] = None + + file_url: Optional[str] = None + + filename: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseInputImage(BaseModel): + """ + :param detail: The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + :param type: The type of the input item. Always `input_image`. + :param file_id: The ID of the file to be sent to the model. + :param image_url: The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. +""" + detail: Literal["low", "high", "auto"] + + type: Literal["input_image"] + + file_id: Optional[str] = None + + image_url: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseInputText(BaseModel): + """ + :param text: The text input to the model. + :param type: The type of the input item. Always `input_text`. +""" + text: str + + type: Literal["input_text"] + + +OpenAIOutputOutputContentList: TypeAlias = Annotated[ + Union[OpenAIResponseInputText, OpenAIResponseInputImage, OpenAIResponseInputFile], Field(discriminator="type") +] + + + +@json_schema_type +class OpenAIResponseCustomToolCallOutput(BaseModel): + """ + :param call_id: The call ID, used to map this custom tool call output to a custom tool call. + :param output: The output from the custom tool call generated by your code. Can be a string or + an list of output content. + :param type: The type of the custom tool call output. Always `custom_tool_call_output`. + :param id: The unique ID of the custom tool call output in the OpenAI platform. +""" + call_id: str + + output: Union[str, List[OpenAIOutputOutputContentList]] + + type: Literal["custom_tool_call_output"] + + id: Optional[str] = None + + + + +@json_schema_type +class OpenAIImageGenerationCall(BaseModel): + """ + :param id: The unique ID of the image generation call. + :param result: The generated image encoded in base64. + :param status: The status of the image generation call. + :param type: The type of the image generation call. Always `image_generation_call`. +""" + id: str + + result: Optional[str] = None + + status: Literal["in_progress", "completed", "generating", "failed"] + + type: Literal["image_generation_call"] + + + + +@json_schema_type +class OpenAIActionOpenPage(BaseModel): + """ + :param type: The action type. + :param url: The URL opened by the model. +""" + type: Literal["open_page"] + + url: str + + + + +@json_schema_type +class OpenAIActionSearchSource(BaseModel): + """ + :param type: The type of source. Always `url`. + :param url: The URL of the source. +""" + type: Literal["url"] + + url: str + + + + +@json_schema_type +class OpenAIActionSearch(BaseModel): + """ + :param query: The search query. + :param type: The action type. + :param sources: The sources used in the search. +""" + query: str + + type: Literal["search"] + + sources: Optional[List[OpenAIActionSearchSource]] = None + + + + +@json_schema_type +class OpenAIActionFind(BaseModel): + """ + :param pattern: The pattern or text to search for within the page. + :param type: The action type. + :param url: The URL of the page searched for the pattern. +""" + pattern: str + + type: Literal["find"] + + url: str + + +OpenAIAction: TypeAlias = Annotated[Union[OpenAIActionSearch, OpenAIActionOpenPage, OpenAIActionFind], Field(discriminator="type")] + + + +@json_schema_type +class OpenAIResponseFunctionWebSearch(BaseModel): + """ + :param id: The unique ID of the web search tool call. + :param action: An object describing the specific action taken in this web search call. Includes + details on how the model used the web (search, open_page, find). + :param status: The status of the web search tool call. + :param type: The type of the web search tool call. Always `web_search_call`. +""" + id: str + + action: OpenAIAction + + status: Literal["in_progress", "searching", "completed", "failed"] + + type: Literal["web_search_call"] + + + + +@json_schema_type +class OpenAIResponseInputFileContent(BaseModel): + """ + :param type: The type of the input item. Always `input_file`. + :param file_data: The base64-encoded data of the file to be sent to the model. + :param file_id: The ID of the file to be sent to the model. + :param file_url: The URL of the file to be sent to the model. + :param filename: The name of the file to be sent to the model. +""" + type: Literal["input_file"] + + file_data: Optional[str] = None + + file_id: Optional[str] = None + + file_url: Optional[str] = None + + filename: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseInputTextContent(BaseModel): + """ + :param text: The text input to the model. + :param type: The type of the input item. Always `input_text`. +""" + text: str + + type: Literal["input_text"] + + + + +@json_schema_type +class OpenAIResponseInputImageContent(BaseModel): + """ + :param type: The type of the input item. Always `input_image`. + :param detail: The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + :param file_id: The ID of the file to be sent to the model. + :param image_url: The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. +""" + type: Literal["input_image"] + + detail: Optional[Literal["low", "high", "auto"]] = None + + file_id: Optional[str] = None + + image_url: Optional[str] = None + + +OpenAIResponseFunctionCallOutputItem: TypeAlias = Annotated[ + Union[OpenAIResponseInputTextContent, OpenAIResponseInputImageContent, OpenAIResponseInputFileContent], + Field(discriminator="type"), +] + +OpenAIResponseFunctionCallOutputItemList: TypeAlias = List[OpenAIResponseFunctionCallOutputItem] + + + +@json_schema_type +class OpenAIFunctionCallOutput(BaseModel): + """ + :param call_id: The unique ID of the function tool call generated by the model. + :param output: Text, image, or file output of the function tool call. + :param type: The type of the function tool call output. Always `function_call_output`. + :param id: The unique ID of the function tool call output. + + Populated when this item is returned via API. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + call_id: str + + output: Union[str, OpenAIResponseFunctionCallOutputItemList] + + type: Literal["function_call_output"] + + id: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + + + +@json_schema_type +class OpenAIMcpListToolsTool(BaseModel): + """ + :param input_schema: The JSON schema describing the tool's input. + :param name: The name of the tool. + :param annotations: Additional annotations about the tool. + :param description: The description of the tool. +""" + input_schema: object + + name: str + + annotations: Optional[object] = None + + description: Optional[str] = None + + + + +@json_schema_type +class OpenAIMcpListTools(BaseModel): + """ + :param id: The unique ID of the list. + :param server_label: The label of the MCP server. + :param tools: The tools available on the server. + :param type: The type of the item. Always `mcp_list_tools`. + :param error: Error message if the server could not list tools. +""" + id: str + + server_label: str + + tools: List[OpenAIMcpListToolsTool] + + type: Literal["mcp_list_tools"] + + error: Optional[str] = None + + + + +@json_schema_type +class OpenAIComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + """ + :param id: The ID of the pending safety check. + :param code: The type of the pending safety check. + :param message: Details about the pending safety check. +""" + id: str + + code: Optional[str] = None + + message: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseComputerToolCallOutputScreenshot(BaseModel): + """ + :param type: Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + :param file_id: The identifier of an uploaded file that contains the screenshot. + :param image_url: The URL of the screenshot image. +""" + type: Literal["computer_screenshot"] + + file_id: Optional[str] = None + + image_url: Optional[str] = None + + + + +@json_schema_type +class OpenAIComputerCallOutput(BaseModel): + """ + :param call_id: The ID of the computer tool call that produced the output. + :param output: A computer screenshot image used with the computer use tool. + :param type: The type of the computer tool call output. Always `computer_call_output`. + :param id: The ID of the computer tool call output. + :param acknowledged_safety_checks: The safety checks reported by the API that have been acknowledged by the + developer. + :param status: The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. +""" + call_id: str + + output: OpenAIResponseComputerToolCallOutputScreenshot + + type: Literal["computer_call_output"] + + id: Optional[str] = None + + acknowledged_safety_checks: Optional[List[OpenAIComputerCallOutputAcknowledgedSafetyCheck]] = None + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + + + +@json_schema_type +class OpenAILocalShellCallOutput(BaseModel): + """ + :param id: The unique ID of the local shell tool call generated by the model. + :param output: A JSON string of the output of the local shell tool call. + :param type: The type of the local shell tool call output. Always `local_shell_call_output`. + :param status: The status of the item. One of `in_progress`, `completed`, or `incomplete`. +""" + id: str + + output: str + + type: Literal["local_shell_call_output"] + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + + + +@json_schema_type +class OpenAIMcpCall(BaseModel): + """ + :param id: The unique ID of the tool call. + :param arguments: A JSON string of the arguments passed to the tool. + :param name: The name of the tool that was run. + :param server_label: The label of the MCP server running the tool. + :param type: The type of the item. Always `mcp_call`. + :param approval_request_id: Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + :param error: The error from the tool call, if any. + :param output: The output from the tool call. + :param status: The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. +""" + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_call"] + + approval_request_id: Optional[str] = None + + error: Optional[str] = None + + output: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + + + + +@json_schema_type +class OpenAILogprobTopLogprob(BaseModel): + token: str + + bytes: List[int] + + logprob: float + + + + +@json_schema_type +class OpenAILogprob(BaseModel): + token: str + + bytes: List[int] + + logprob: float + + top_logprobs: List[OpenAILogprobTopLogprob] + + + + +@json_schema_type +class OpenAIAnnotationFileCitation(BaseModel): + """ + :param file_id: The ID of the file. + :param filename: The filename of the file cited. + :param index: The index of the file in the list of files. + :param type: The type of the file citation. Always `file_citation`. +""" + file_id: str + + filename: str + + index: int + + type: Literal["file_citation"] + + + + +@json_schema_type +class OpenAIAnnotationURLCitation(BaseModel): + """ + :param end_index: The index of the last character of the URL citation in the message. + :param start_index: The index of the first character of the URL citation in the message. + :param title: The title of the web resource. + :param type: The type of the URL citation. Always `url_citation`. + :param url: The URL of the web resource. +""" + end_index: int + + start_index: int + + title: str + + type: Literal["url_citation"] + + url: str + + + + +@json_schema_type +class OpenAIAnnotationFilePath(BaseModel): + """ + :param file_id: The ID of the file. + :param index: The index of the file in the list of files. + :param type: The type of the file path. Always `file_path`. +""" + file_id: str + + index: int + + type: Literal["file_path"] + + + + +@json_schema_type +class OpenAIAnnotationContainerFileCitation(BaseModel): + """ + :param container_id: The ID of the container file. + :param end_index: The index of the last character of the container file citation in the message. + :param file_id: The ID of the file. + :param filename: The filename of the container file cited. + :param start_index: The index of the first character of the container file citation in the message. + :param type: The type of the container file citation. Always `container_file_citation`. +""" + container_id: str + + end_index: int + + file_id: str + + filename: str + + start_index: int + + type: Literal["container_file_citation"] + + +OpenAIAnnotation: TypeAlias = Annotated[ + Union[OpenAIAnnotationFileCitation, OpenAIAnnotationURLCitation, OpenAIAnnotationContainerFileCitation, OpenAIAnnotationFilePath], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAIResponseOutputText(BaseModel): + """ + :param annotations: The annotations of the text output. + :param text: The text output from the model. + :param type: The type of the output text. Always `output_text`. +""" + annotations: List[OpenAIAnnotation] + + text: str + + type: Literal["output_text"] + + logprobs: Optional[List[OpenAILogprob]] = None + + + + +@json_schema_type +class OpenAIResponseOutputRefusal(BaseModel): + """ + :param refusal: The refusal explanation from the model. + :param type: The type of the refusal. Always `refusal`. +""" + refusal: str + + type: Literal["refusal"] + + +OpenAIContent: TypeAlias = Annotated[Union[OpenAIResponseOutputText, OpenAIResponseOutputRefusal], Field(discriminator="type")] + + + +@json_schema_type +class OpenAIResponseOutputMessage(BaseModel): + """ + :param id: The unique ID of the output message. + :param content: The content of the output message. + :param role: The role of the output message. Always `assistant`. + :param status: The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + :param type: The type of the output message. Always `message`. +""" + id: str + + content: List[OpenAIContent] + + role: Literal["assistant"] + + status: Literal["in_progress", "completed", "incomplete"] + + type: Literal["message"] + + + + +@json_schema_type +class OpenAIInputAudio(BaseModel): + """ + :param data: Base64-encoded audio data. + :param format: The format of the audio data. Currently supported formats are `mp3` and `wav`. +""" + data: str + + format: Literal["mp3", "wav"] + + + + +@json_schema_type +class OpenAIResponseInputAudio(BaseModel): + """ + :param type: The type of the input item. Always `input_audio`. +""" + input_audio: OpenAIInputAudio + + type: Literal["input_audio"] + + +OpenAIResponseInputContent: TypeAlias = Annotated[ + Union[OpenAIResponseInputText, OpenAIResponseInputImage, OpenAIResponseInputFile, OpenAIResponseInputAudio], + Field(discriminator="type"), +] + +OpenAIResponseInputMessageContentList: TypeAlias = List[OpenAIResponseInputContent] + + + +@json_schema_type +class OpenAIEasyInputMessage(BaseModel): + """ + :param content: Text, image, or audio input to the model, used to generate a response. Can also + contain previous assistant responses. + :param role: The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + :param type: The type of the message input. Always `message`. +""" + content: Union[str, OpenAIResponseInputMessageContentList] + + role: Literal["user", "assistant", "system", "developer"] + + type: Optional[Literal["message"]] = None + + + + +@json_schema_type +class OpenAIMcpApprovalResponse(BaseModel): + """ + :param approval_request_id: The ID of the approval request being answered. + :param approve: Whether the request was approved. + :param type: The type of the item. Always `mcp_approval_response`. + :param id: The unique ID of the approval response + :param reason: Optional reason for the decision. +""" + approval_request_id: str + + approve: bool + + type: Literal["mcp_approval_response"] + + id: Optional[str] = None + + reason: Optional[str] = None + + + + +@json_schema_type +class OpenAIMcpApprovalRequest(BaseModel): + """ + :param id: The unique ID of the approval request. + :param arguments: A JSON string of arguments for the tool. + :param name: The name of the tool to run. + :param server_label: The label of the MCP server making the request. + :param type: The type of the item. Always `mcp_approval_request`. +""" + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_approval_request"] + + + + +@json_schema_type +class OpenAILocalShellCallAction(BaseModel): + """ + :param command: The command to run. + :param env: Environment variables to set for the command. + :param type: The type of the local shell action. Always `exec`. + :param timeout_ms: Optional timeout in milliseconds for the command. + :param user: Optional user to run the command as. + :param working_directory: Optional working directory to run the command in. +""" + command: List[str] + + env: Dict[str, str] + + type: Literal["exec"] + + timeout_ms: Optional[int] = None + + user: Optional[str] = None + + working_directory: Optional[str] = None + + + + +@json_schema_type +class OpenAILocalShellCall(BaseModel): + """ + :param id: The unique ID of the local shell call. + :param action: Execute a shell command on the server. + :param call_id: The unique ID of the local shell tool call generated by the model. + :param status: The status of the local shell call. + :param type: The type of the local shell call. Always `local_shell_call`. +""" + id: str + + action: OpenAILocalShellCallAction + + call_id: str + + status: Literal["in_progress", "completed", "incomplete"] + + type: Literal["local_shell_call"] + + + + +@json_schema_type +class OpenAIMessage(BaseModel): + """ + :param content: A list of one or many input items to the model, containing different content + types. + :param role: The role of the message input. One of `user`, `system`, or `developer`. + :param status: The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + :param type: The type of the message input. Always set to `message`. +""" + content: OpenAIResponseInputMessageContentList + + role: Literal["user", "system", "developer"] + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + type: Optional[Literal["message"]] = None + + + + +@json_schema_type +class OpenAIResponseCustomToolCall(BaseModel): + """ + :param call_id: An identifier used to map this custom tool call to a tool call output. + :param input: The input for the custom tool call generated by the model. + :param name: The name of the custom tool being called. + :param type: The type of the custom tool call. Always `custom_tool_call`. + :param id: The unique ID of the custom tool call in the OpenAI platform. +""" + call_id: str + + input: str + + name: str + + type: Literal["custom_tool_call"] + + id: Optional[str] = None + + + + +@json_schema_type +class OpenAIResponseFunctionToolCall(BaseModel): + """ + :param arguments: A JSON string of the arguments to pass to the function. + :param call_id: The unique ID of the function tool call generated by the model. + :param name: The name of the function to run. + :param type: The type of the function tool call. Always `function_call`. + :param id: The unique ID of the function tool call. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + arguments: str + + call_id: str + + name: str + + type: Literal["function_call"] + + id: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + + + +@json_schema_type +class OpenAIOutputLogs(BaseModel): + """ + :param logs: The logs output from the code interpreter. + :param type: The type of the output. Always 'logs'. +""" + logs: str + + type: Literal["logs"] + + + + +@json_schema_type +class OpenAIOutputImage(BaseModel): + """ + :param type: The type of the output. Always 'image'. + :param url: The URL of the image output from the code interpreter. +""" + type: Literal["image"] + + url: str + + +OpenAIOutput: TypeAlias = Annotated[Union[OpenAIOutputLogs, OpenAIOutputImage], Field(discriminator="type")] + + + +@json_schema_type +class OpenAIResponseCodeInterpreterToolCall(BaseModel): + """ + :param id: The unique ID of the code interpreter tool call. + :param code: The code to run, or null if not available. + :param container_id: The ID of the container used to run the code. + :param outputs: The outputs generated by the code interpreter, such as logs or images. Can be + null if no outputs are available. + :param status: The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + :param type: The type of the code interpreter tool call. Always `code_interpreter_call`. +""" + id: str + + code: Optional[str] = None + + container_id: str + + outputs: Optional[List[OpenAIOutput]] = None + + status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] + + type: Literal["code_interpreter_call"] + + + + +@json_schema_type +class OpenAIContent(BaseModel): + """ + :param text: The reasoning text from the model. + :param type: The type of the reasoning text. Always `reasoning_text`. +""" + text: str + + type: Literal["reasoning_text"] + + + + +@json_schema_type +class OpenAISummary(BaseModel): + """ + :param text: A summary of the reasoning output from the model so far. + :param type: The type of the object. Always `summary_text`. +""" + text: str + + type: Literal["summary_text"] + + + + +@json_schema_type +class OpenAIResponseReasoningItem(BaseModel): + """ + :param id: The unique identifier of the reasoning content. + :param summary: Reasoning summary content. + :param type: The type of the object. Always `reasoning`. + :param content: Reasoning text content. + :param encrypted_content: The encrypted content of the reasoning item - populated when a response is + generated with `reasoning.encrypted_content` in the `include` parameter. + :param status: The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. +""" + id: str + + summary: List[OpenAISummary] + + type: Literal["reasoning"] + + content: Optional[List[OpenAIContent]] = None + + encrypted_content: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + +OpenAIResponseInputItem: TypeAlias = Annotated[ + Union[ + OpenAIEasyInputMessage, + OpenAIMessage, + OpenAIResponseOutputMessage, + OpenAIResponseFileSearchToolCall, + OpenAIResponseComputerToolCall, + OpenAIComputerCallOutput, + OpenAIResponseFunctionWebSearch, + OpenAIResponseFunctionToolCall, + OpenAIFunctionCallOutput, + OpenAIResponseReasoningItem, + OpenAIImageGenerationCall, + OpenAIResponseCodeInterpreterToolCall, + OpenAILocalShellCall, + OpenAILocalShellCallOutput, + OpenAIMcpListTools, + OpenAIMcpApprovalRequest, + OpenAIMcpApprovalResponse, + OpenAIMcpCall, + OpenAIResponseCustomToolCallOutput, + OpenAIResponseCustomToolCall, + OpenAIItemReference, + ], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAIResponseFormatJSONObject(BaseModel): + """ + :param type: The type of response format being defined. Always `json_object`. +""" + type: Literal["json_object"] + + + + +@json_schema_type +class OpenAIResponseFormatTextJSONSchemaConfig(BaseModel): + """ + :param name: The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + :param schema_: The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + :param type: The type of response format being defined. Always `json_schema`. + :param description: A description of what the response format is for, used by the model to determine + how to respond in the format. + :param strict: Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). +""" + name: str + + schema_: Dict[str, object] = Field(alias="schema") + + type: Literal["json_schema"] + + description: Optional[str] = None + + strict: Optional[bool] = None + + + + +@json_schema_type +class OpenAIResponseFormatText(BaseModel): + """ + :param type: The type of response format being defined. Always `text`. +""" + type: Literal["text"] + + +OpenAIResponseFormatTextConfig: TypeAlias = Annotated[ + Union[OpenAIResponseFormatText, OpenAIResponseFormatTextJSONSchemaConfig, OpenAIResponseFormatJSONObject], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAIResponseTextConfig(BaseModel): + """ + :param format: An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + :param verbosity: Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. +""" + format: Optional[OpenAIResponseFormatTextConfig] = None + + verbosity: Optional[Literal["low", "medium", "high"]] = None + + +OpenAIResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"] + + + +@json_schema_type +class OpenAIIncompleteDetails(BaseModel): + """ + :param reason: The reason why the response is incomplete. +""" + reason: Optional[Literal["max_output_tokens", "content_filter"]] = None + + +OpenAIResponsesModel: TypeAlias = Union[ + str, + OpenAIChatModel, + Literal[ + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + ], +] + +OpenAIVariables: TypeAlias = Union[str, OpenAIResponseInputText, OpenAIResponseInputImage, OpenAIResponseInputFile] + + + +@json_schema_type +class OpenAIResponsePrompt(BaseModel): + """ + :param id: The unique identifier of the prompt template to use. + :param variables: Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + :param version: Optional version of the prompt template. +""" + id: str + + variables: Optional[Dict[str, OpenAIVariables]] = None + + version: Optional[str] = None + + + + +@json_schema_type +class OpenAIToolChoiceTypes(BaseModel): + """ + :param type: The type of hosted tool the model should to use. + + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + - `code_interpreter` + - `image_generation` +""" + type: Literal[ + "file_search", + "web_search_preview", + "computer_use_preview", + "web_search_preview_2025_03_11", + "image_generation", + "code_interpreter", + ] + + + + +@json_schema_type +class OpenAIToolChoiceAllowed(BaseModel): + """ + :param mode: Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + :param tools: A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + :param type: Allowed tool configuration type. Always `allowed_tools`. +""" + mode: Literal["auto", "required"] + + tools: List[Dict[str, object]] + + type: Literal["allowed_tools"] + + + + +@json_schema_type +class OpenAIToolChoiceFunction(BaseModel): + """ + :param name: The name of the function to call. + :param type: For function calling, the type is always `function`. +""" + name: str + + type: Literal["function"] + + + + +@json_schema_type +class OpenAIToolChoiceCustom(BaseModel): + """ + :param name: The name of the custom tool to call. + :param type: For custom tool calling, the type is always `custom`. +""" + name: str + + type: Literal["custom"] + + + + +@json_schema_type +class OpenAIToolChoiceMcp(BaseModel): + """ + :param server_label: The label of the MCP server to use. + :param type: For MCP tools, the type is always `mcp`. + :param name: The name of the tool to call on the server. +""" + server_label: str + + type: Literal["mcp"] + + name: Optional[str] = None + + +OpenAIToolChoice: TypeAlias = Union[ + OpenAIToolChoiceOptions, OpenAIToolChoiceAllowed, OpenAIToolChoiceTypes, OpenAIToolChoiceFunction, OpenAIToolChoiceMcp, OpenAIToolChoiceCustom +] + + + +@json_schema_type +class OpenAIReasoning(BaseModel): + """ + :param effort: Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + + Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning + effort. + :param generate_summary: **Deprecated:** use `summary` instead. + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `auto`, + `concise`, or `detailed`. + :param summary: A summary of the reasoning performed by the model. + + This can be useful for debugging and understanding the model's reasoning + process. One of `auto`, `concise`, or `detailed`. +""" + effort: Optional[OpenAIReasoningEffort] = None + + generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None + + summary: Optional[Literal["auto", "concise", "detailed"]] = None + + + + +@json_schema_type +class OpenAIOutputTokensDetails(BaseModel): + """ + :param reasoning_tokens: The number of reasoning tokens. +""" + reasoning_tokens: int + + + + +@json_schema_type +class OpenAIInputTokensDetails(BaseModel): + """ + :param cached_tokens: The number of tokens that were retrieved from the cache. + + [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). +""" + cached_tokens: int + + + + +@json_schema_type +class OpenAIResponseUsage(BaseModel): + """ + :param input_tokens: The number of input tokens. + :param input_tokens_details: A detailed breakdown of the input tokens. + :param output_tokens: The number of output tokens. + :param output_tokens_details: A detailed breakdown of the output tokens. + :param total_tokens: The total number of tokens used. +""" + input_tokens: int + + input_tokens_details: OpenAIInputTokensDetails + + output_tokens: int + + output_tokens_details: OpenAIOutputTokensDetails + + total_tokens: int + + + + +@json_schema_type +class OpenAIResponseError(BaseModel): + """ + :param code: The error code for the response. + :param message: A human-readable description of the error. +""" + code: Literal[ + "server_error", + "rate_limit_exceeded", + "invalid_prompt", + "vector_store_timeout", + "invalid_image", + "invalid_image_format", + "invalid_base64_image", + "invalid_image_url", + "image_too_large", + "image_too_small", + "image_parse_error", + "image_content_policy_violation", + "invalid_image_mode", + "image_file_too_large", + "unsupported_image_media_type", + "empty_image_file", + "failed_to_download_image", + "image_file_not_found", + ] + + message: str + + + + +@json_schema_type +class OpenAIImageGenerationCall(BaseModel): + """ + :param id: The unique ID of the image generation call. + :param result: The generated image encoded in base64. + :param status: The status of the image generation call. + :param type: The type of the image generation call. Always `image_generation_call`. +""" + id: str + + result: Optional[str] = None + + status: Literal["in_progress", "completed", "generating", "failed"] + + type: Literal["image_generation_call"] + + + + +@json_schema_type +class OpenAIMcpListToolsTool(BaseModel): + """ + :param input_schema: The JSON schema describing the tool's input. + :param name: The name of the tool. + :param annotations: Additional annotations about the tool. + :param description: The description of the tool. +""" + input_schema: object + + name: str + + annotations: Optional[object] = None + + description: Optional[str] = None + + + + +@json_schema_type +class OpenAIMcpListTools(BaseModel): + """ + :param id: The unique ID of the list. + :param server_label: The label of the MCP server. + :param tools: The tools available on the server. + :param type: The type of the item. Always `mcp_list_tools`. + :param error: Error message if the server could not list tools. +""" + id: str + + server_label: str + + tools: List[OpenAIMcpListToolsTool] + + type: Literal["mcp_list_tools"] + + error: Optional[str] = None + + + + +@json_schema_type +class OpenAIMcpCall(BaseModel): + """ + :param id: The unique ID of the tool call. + :param arguments: A JSON string of the arguments passed to the tool. + :param name: The name of the tool that was run. + :param server_label: The label of the MCP server running the tool. + :param type: The type of the item. Always `mcp_call`. + :param approval_request_id: Unique identifier for the MCP tool call approval request. Include this value in + a subsequent `mcp_approval_response` input to approve or reject the + corresponding tool call. + :param error: The error from the tool call, if any. + :param output: The output from the tool call. + :param status: The status of the tool call. + + One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`. +""" + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_call"] + + approval_request_id: Optional[str] = None + + error: Optional[str] = None + + output: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None + + + + +@json_schema_type +class OpenAIMcpApprovalRequest(BaseModel): + """ + :param id: The unique ID of the approval request. + :param arguments: A JSON string of arguments for the tool. + :param name: The name of the tool to run. + :param server_label: The label of the MCP server making the request. + :param type: The type of the item. Always `mcp_approval_request`. +""" + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_approval_request"] + + + + +@json_schema_type +class OpenAILocalShellCallAction(BaseModel): + """ + :param command: The command to run. + :param env: Environment variables to set for the command. + :param type: The type of the local shell action. Always `exec`. + :param timeout_ms: Optional timeout in milliseconds for the command. + :param user: Optional user to run the command as. + :param working_directory: Optional working directory to run the command in. +""" + command: List[str] + + env: Dict[str, str] + + type: Literal["exec"] + + timeout_ms: Optional[int] = None + + user: Optional[str] = None + + working_directory: Optional[str] = None + + + + +@json_schema_type +class OpenAILocalShellCall(BaseModel): + """ + :param id: The unique ID of the local shell call. + :param action: Execute a shell command on the server. + :param call_id: The unique ID of the local shell tool call generated by the model. + :param status: The status of the local shell call. + :param type: The type of the local shell call. Always `local_shell_call`. +""" + id: str + + action: OpenAILocalShellCallAction + + call_id: str + + status: Literal["in_progress", "completed", "incomplete"] + + type: Literal["local_shell_call"] + + +OpenAIResponseOutputItem: TypeAlias = Annotated[ + Union[ + OpenAIResponseOutputMessage, + OpenAIResponseFileSearchToolCall, + OpenAIResponseFunctionToolCall, + OpenAIResponseFunctionWebSearch, + OpenAIResponseComputerToolCall, + OpenAIResponseReasoningItem, + OpenAIImageGenerationCall, + OpenAIResponseCodeInterpreterToolCall, + OpenAILocalShellCall, + OpenAIMcpCall, + OpenAIMcpListTools, + OpenAIMcpApprovalRequest, + OpenAIResponseCustomToolCall, + ], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAICodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): + """ + :param type: Always `auto`. + :param file_ids: An optional list of uploaded files to make available to your code. +""" + type: Literal["auto"] + + file_ids: Optional[List[str]] = None + + +OpenAICodeInterpreterContainer: TypeAlias = Union[str, OpenAICodeInterpreterContainerCodeInterpreterToolAuto] + + + +@json_schema_type +class OpenAICodeInterpreter(BaseModel): + """ + :param container: The code interpreter container. + + Can be a container ID or an object that specifies uploaded file IDs to make + available to your code. + :param type: The type of the code interpreter tool. Always `code_interpreter`. +""" + container: OpenAICodeInterpreterContainer + + type: Literal["code_interpreter"] + + + + +@json_schema_type +class OpenAILocalShell(BaseModel): + """ + :param type: The type of the local shell tool. Always `local_shell`. +""" + type: Literal["local_shell"] + + + + +@json_schema_type +class OpenAIGrammar(BaseModel): + """ + :param definition: The grammar definition. + :param syntax: The syntax of the grammar definition. One of `lark` or `regex`. + :param type: Grammar format. Always `grammar`. +""" + definition: str + + syntax: Literal["lark", "regex"] + + type: Literal["grammar"] + + + + +@json_schema_type +class OpenAIText(BaseModel): + """ + :param type: Unconstrained text format. Always `text`. +""" + type: Literal["text"] + + +OpenAICustomToolInputFormat: TypeAlias = Annotated[Union[OpenAIText, OpenAIGrammar], Field(discriminator="type")] + + + +@json_schema_type +class OpenAICustomTool(BaseModel): + """ + :param name: The name of the custom tool, used to identify it in tool calls. + :param type: The type of the custom tool. Always `custom`. + :param description: Optional description of the custom tool, used to provide more context. + :param format: The input format for the custom tool. Default is unconstrained text. +""" + name: str + + type: Literal["custom"] + + description: Optional[str] = None + + format: Optional[OpenAICustomToolInputFormat] = None + + + + +@json_schema_type +class OpenAIUserLocation(BaseModel): + """ + :param type: The type of location approximation. Always `approximate`. + :param city: Free text input for the city of the user, e.g. `San Francisco`. + :param country: The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + :param region: Free text input for the region of the user, e.g. `California`. + :param timezone: The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. +""" + type: Literal["approximate"] + + city: Optional[str] = None + + country: Optional[str] = None + + region: Optional[str] = None + + timezone: Optional[str] = None + + + + +@json_schema_type +class OpenAIWebSearchPreviewTool(BaseModel): + """ + :param type: The type of the web search tool. + + One of `web_search_preview` or `web_search_preview_2025_03_11`. + :param search_context_size: High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + :param user_location: The user's location. +""" + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] + + search_context_size: Optional[Literal["low", "medium", "high"]] = None + + user_location: Optional[OpenAIUserLocation] = None + + + + +@json_schema_type +class OpenAIImageGenerationInputImageMask(BaseModel): + """ + :param file_id: File ID for the mask image. + :param image_url: Base64-encoded mask image. +""" + file_id: Optional[str] = None + + image_url: Optional[str] = None + + + + +@json_schema_type +class OpenAIImageGeneration(BaseModel): + """ + :param type: The type of the image generation tool. Always `image_generation`. + :param background: Background type for the generated image. + + One of `transparent`, `opaque`, or `auto`. Default: `auto`. + :param input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and + `low`. Defaults to `low`. + :param input_image_mask: Optional mask for inpainting. + + Contains `image_url` (string, optional) and `file_id` (string, optional). + :param model: The image generation model to use. Default: `gpt-image-1`. + :param moderation: Moderation level for the generated image. Default: `auto`. + :param output_compression: Compression level for the output image. Default: 100. + :param output_format: The output format of the generated image. + + One of `png`, `webp`, or `jpeg`. Default: `png`. + :param partial_images: Number of partial images to generate in streaming mode, from 0 (default value) + to 3. + :param quality: The quality of the generated image. + + One of `low`, `medium`, `high`, or `auto`. Default: `auto`. + :param size: The size of the generated image. + + One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`. +""" + type: Literal["image_generation"] + + background: Optional[Literal["transparent", "opaque", "auto"]] = None + + input_fidelity: Optional[Literal["high", "low"]] = None + + input_image_mask: Optional[OpenAIImageGenerationInputImageMask] = None + + model: Optional[Literal["gpt-image-1", "gpt-image-1-mini"]] = None + + moderation: Optional[Literal["auto", "low"]] = None + + output_compression: Optional[int] = None + + output_format: Optional[Literal["png", "webp", "jpeg"]] = None + + partial_images: Optional[int] = None + + quality: Optional[Literal["low", "medium", "high", "auto"]] = None + + size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None + + + + +@json_schema_type +class OpenAIUserLocation(BaseModel): + """ + :param city: Free text input for the city of the user, e.g. `San Francisco`. + :param country: The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + :param region: Free text input for the region of the user, e.g. `California`. + :param timezone: The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + :param type: The type of location approximation. Always `approximate`. +""" + city: Optional[str] = None + + country: Optional[str] = None + + region: Optional[str] = None + + timezone: Optional[str] = None + + type: Optional[Literal["approximate"]] = None + + + + +@json_schema_type +class OpenAIFilters(BaseModel): + """ + :param allowed_domains: Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` +""" + allowed_domains: Optional[List[str]] = None + + + + +@json_schema_type +class OpenAIWebSearchTool(BaseModel): + """ + :param type: The type of the web search tool. + + One of `web_search` or `web_search_2025_08_26`. + :param filters: Filters for the search. + :param search_context_size: High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + :param user_location: The approximate location of the user. +""" + type: Literal["web_search", "web_search_2025_08_26"] + + filters: Optional[OpenAIFilters] = None + + search_context_size: Optional[Literal["low", "medium", "high"]] = None + + user_location: Optional[OpenAIUserLocation] = None + + + + +@json_schema_type +class OpenAIMcpAllowedToolsMcpToolFilter(BaseModel): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: Optional[bool] = None + + tool_names: Optional[List[str]] = None + + +OpenAIMcpAllowedTools: TypeAlias = Union[List[str], OpenAIMcpAllowedToolsMcpToolFilter, None] + + + +@json_schema_type +class OpenAIMcpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: Optional[bool] = None + + tool_names: Optional[List[str]] = None + + + + +@json_schema_type +class OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """ + :param read_only: Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + :param tool_names: List of allowed tool names. +""" + read_only: Optional[bool] = None + + tool_names: Optional[List[str]] = None + + + + +@json_schema_type +class OpenAIMcpRequireApprovalMcpToolApprovalFilter(BaseModel): + """ + :param always: A filter object to specify which tools are allowed. + :param never: A filter object to specify which tools are allowed. +""" + always: Optional[OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways] = None + + never: Optional[OpenAIMcpRequireApprovalMcpToolApprovalFilterNever] = None + + +OpenAIMcpRequireApproval: TypeAlias = Union[OpenAIMcpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] + + + +@json_schema_type +class OpenAIMcp(BaseModel): + """ + :param server_label: A label for this MCP server, used to identify it in tool calls. + :param type: The type of the MCP tool. Always `mcp`. + :param allowed_tools: List of allowed tool names or a filter object. + :param authorization: An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + :param connector_id: Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + :param headers: Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + :param require_approval: Specify which of the MCP server's tools require approval. + :param server_description: Optional description of the MCP server, used to provide more context. + :param server_url: The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. +""" + server_label: str + + type: Literal["mcp"] + + allowed_tools: Optional[OpenAIMcpAllowedTools] = None + + authorization: Optional[str] = None + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + + headers: Optional[Dict[str, str]] = None + + require_approval: Optional[OpenAIMcpRequireApproval] = None + + server_description: Optional[str] = None + + server_url: Optional[str] = None + + + + +@json_schema_type +class OpenAIComputerTool(BaseModel): + """ + :param display_height: The height of the computer display. + :param display_width: The width of the computer display. + :param environment: The type of computer environment to control. + :param type: The type of the computer use tool. Always `computer_use_preview`. +""" + display_height: int + + display_width: int + + environment: Literal["windows", "mac", "linux", "ubuntu", "browser"] + + type: Literal["computer_use_preview"] + + + + +@json_schema_type +class OpenAIFunctionTool(BaseModel): + """ + :param name: The name of the function to call. + :param parameters: A JSON schema object describing the parameters of the function. + :param strict: Whether to enforce strict parameter validation. Default `true`. + :param type: The type of the function tool. Always `function`. + :param description: A description of the function. + + Used by the model to determine whether or not to call the function. +""" + name: str + + parameters: Optional[Dict[str, object]] = None + + strict: Optional[bool] = None + + type: Literal["function"] + + description: Optional[str] = None + + + + +@json_schema_type +class OpenAIRankingOptions(BaseModel): + """ + :param ranker: The ranker to use for the file search. + :param score_threshold: The score threshold for the file search, a number between 0 and 1. + + Numbers closer to 1 will attempt to return only the most relevant results, but + may return fewer results. +""" + ranker: Optional[Literal["auto", "default-2024-11-15"]] = None + + score_threshold: Optional[float] = None + + + + +@json_schema_type +class OpenAIComparisonFilter(BaseModel): + """ + :param key: The key to compare against the value. + :param type: Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, + `nin`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + - `in`: in + - `nin`: not in + :param value: The value to compare against the attribute key; supports string, number, or + boolean types. +""" + key: str + + type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] + + value: Union[str, float, bool, List[Union[str, float]]] + + +OpenAIFilter: TypeAlias = Union[OpenAIComparisonFilter, object] + + + +@json_schema_type +class OpenAICompoundFilter(BaseModel): + """ + :param filters: Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + :param type: Type of operation: `and` or `or`. +""" + filters: List[OpenAIFilter] + + type: Literal["and", "or"] + + +OpenAIFilters: TypeAlias = Union[OpenAIComparisonFilter, OpenAICompoundFilter, None] + + + +@json_schema_type +class OpenAIFileSearchTool(BaseModel): + """ + :param type: The type of the file search tool. Always `file_search`. + :param vector_store_ids: The IDs of the vector stores to search. + :param filters: A filter to apply. + :param max_num_results: The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + :param ranking_options: Ranking options for search. +""" + type: Literal["file_search"] + + vector_store_ids: List[str] + + filters: Optional[OpenAIFilters] = None + + max_num_results: Optional[int] = None + + ranking_options: Optional[OpenAIRankingOptions] = None + + +OpenAITool: TypeAlias = Annotated[ + Union[ + OpenAIFunctionTool, + OpenAIFileSearchTool, + OpenAIComputerTool, + OpenAIWebSearchTool, + OpenAIMcp, + OpenAICodeInterpreter, + OpenAIImageGeneration, + OpenAILocalShell, + OpenAICustomTool, + OpenAIWebSearchPreviewTool, + ], + Field(discriminator="type"), +] + + + +@json_schema_type +class OpenAIResponse(BaseModel): + """ + :param id: Unique identifier for this Response. + :param created_at: Unix timestamp (in seconds) of when this Response was created. + :param error: An error object returned when the model fails to generate a Response. + :param incomplete_details: Details about why the response is incomplete. + :param instructions: A system (or developer) message inserted into the model's context. + + When using along with `previous_response_id`, the instructions from a previous + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. + :param metadata: Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + :param model: Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + :param object: The object type of this resource - always set to `response`. + :param output: An array of content items generated by the model. + + - The length and order of items in the `output` array is dependent on the + model's response. + - Rather than accessing the first item in the `output` array and assuming it's + an `assistant` message with the content generated by the model, you might + consider using the `output_text` property where supported in SDKs. + :param parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + :param temperature: What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + :param tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + :param tools: An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + We support the following categories of tools: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code with strongly typed arguments and outputs. + Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + :param background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + :param conversation: The conversation that this response belongs to. + + Input items and output items from this response are automatically added to this + conversation. + :param max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + :param max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + :param previous_response_id: The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + :param prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + :param prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + :param reasoning: **gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + :param safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + :param service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. + - When not set, the default behavior is 'auto'. + + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. + :param status: The status of the response generation. + + One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or + `incomplete`. + :param text: Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + :param truncation: The truncation strategy to use for the model response. + + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size + for a model, the request will fail with a 400 error. + :param usage: Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. + :param user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. + + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). +""" + id: str + + created_at: float + + error: Optional[OpenAIResponseError] = None + + incomplete_details: Optional[OpenAIIncompleteDetails] = None + + instructions: Union[str, List[OpenAIResponseInputItem], None] = None + + metadata: Optional[OpenAIMetadata] = None + + model: OpenAIResponsesModel + + object: Literal["response"] + + output: List[OpenAIResponseOutputItem] + + parallel_tool_calls: bool + + temperature: Optional[float] = None + + tool_choice: OpenAIToolChoice + + tools: List[OpenAITool] + + top_p: Optional[float] = None + + background: Optional[bool] = None + + conversation: Optional[OpenAIConversation] = None + + max_output_tokens: Optional[int] = None + + max_tool_calls: Optional[int] = None + + previous_response_id: Optional[str] = None + + prompt: Optional[OpenAIResponsePrompt] = None + + prompt_cache_key: Optional[str] = None + + reasoning: Optional[OpenAIReasoning] = None + + safety_identifier: Optional[str] = None + + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None + + status: Optional[OpenAIResponseStatus] = None + + text: Optional[OpenAIResponseTextConfig] = None + + top_logprobs: Optional[int] = None + + truncation: Optional[Literal["auto", "disabled"]] = None + + usage: Optional[OpenAIResponseUsage] = None + + user: Optional[str] = None + + @property + def output_text(self) -> str: + """Convenience property that aggregates all `output_text` items from the `output` list. + + If no `output_text` content blocks exist, then an empty string is returned. + """ + texts: List[str] = [] + for output in self.output: + if output.type == "message": + for content in output.content: + if content.type == "output_text": + texts.append(content.text) + + return "".join(texts) + + + +register_schema(OpenAIMetadata, name="OpenAIMetadata") +register_schema(OpenAICodeInterpreterContainer, name="OpenAICodeInterpreterContainer") +register_schema(OpenAIFilter, name="OpenAIFilter") +register_schema(OpenAIFilters, name="OpenAIFilters") +register_schema(OpenAIMcpRequireApproval, name="OpenAIMcpRequireApproval") +register_schema(OpenAIMcpAllowedTools, name="OpenAIMcpAllowedTools") +register_schema(OpenAICustomToolInputFormat, name="OpenAICustomToolInputFormat") +register_schema(OpenAIToolParam, name="OpenAIToolParam") +register_schema(OpenAIConversation, name="OpenAIConversation") +register_schema(OpenAIResponseIncludable, name="OpenAIResponseIncludable") +register_schema(OpenAIResponseFormatTextConfigParam, name="OpenAIResponseFormatTextConfigParam") +register_schema(OpenAIChatModel, name="OpenAIChatModel") +register_schema(OpenAIResponsesModel, name="OpenAIResponsesModel") +register_schema(OpenAIVariables, name="OpenAIVariables") +register_schema(OpenAIToolChoiceOptions, name="OpenAIToolChoiceOptions") +register_schema(OpenAIToolChoice, name="OpenAIToolChoice") +register_schema(OpenAIReasoningEffort, name="OpenAIReasoningEffort") +register_schema(OpenAIAction, name="OpenAIAction") +register_schema(OpenAIOutputOutputContentList, name="OpenAIOutputOutputContentList") +register_schema(OpenAIResponseFunctionCallOutputItemParam, name="OpenAIResponseFunctionCallOutputItemParam") +register_schema(OpenAIResponseFunctionCallOutputItemListParam, name="OpenAIResponseFunctionCallOutputItemListParam") +register_schema(OpenAIAnnotation, name="OpenAIAnnotation") +register_schema(OpenAIContent, name="OpenAIContent") +register_schema(OpenAIResponseInputContentParam, name="OpenAIResponseInputContentParam") +register_schema(OpenAIResponseInputMessageContentListParam, name="OpenAIResponseInputMessageContentListParam") +register_schema(OpenAIOutput, name="OpenAIOutput") +register_schema(OpenAIResponseInputItemParam, name="OpenAIResponseInputItemParam") +register_schema(OpenAIResponseInputParam, name="OpenAIResponseInputParam") +register_schema(OpenAIResponseCreateParamsNonStreaming, name="OpenAIResponseCreateParamsNonStreaming") +register_schema(OpenAIResponseCreateParamsStreaming, name="OpenAIResponseCreateParamsStreaming") +register_schema(OpenAIResponseCreateParams, name="OpenAIResponseCreateParams") +register_schema(OpenAIResult, name="OpenAIResult") +register_schema(OpenAIResponseFileSearchToolCall, name="OpenAIResponseFileSearchToolCall") +register_schema(OpenAIItemReference, name="OpenAIItemReference") +register_schema(OpenAIActionClick, name="OpenAIActionClick") +register_schema(OpenAIActionKeypress, name="OpenAIActionKeypress") +register_schema(OpenAIActionDragPath, name="OpenAIActionDragPath") +register_schema(OpenAIActionDrag, name="OpenAIActionDrag") +register_schema(OpenAIActionMove, name="OpenAIActionMove") +register_schema(OpenAIActionWait, name="OpenAIActionWait") +register_schema(OpenAIActionScroll, name="OpenAIActionScroll") +register_schema(OpenAIActionScreenshot, name="OpenAIActionScreenshot") +register_schema(OpenAIActionType, name="OpenAIActionType") +register_schema(OpenAIActionDoubleClick, name="OpenAIActionDoubleClick") +register_schema(OpenAIPendingSafetyCheck, name="OpenAIPendingSafetyCheck") +register_schema(OpenAIResponseComputerToolCall, name="OpenAIResponseComputerToolCall") +register_schema(OpenAIResponseInputFile, name="OpenAIResponseInputFile") +register_schema(OpenAIResponseInputImage, name="OpenAIResponseInputImage") +register_schema(OpenAIResponseInputText, name="OpenAIResponseInputText") +register_schema(OpenAIResponseCustomToolCallOutput, name="OpenAIResponseCustomToolCallOutput") +register_schema(OpenAIImageGenerationCall, name="OpenAIImageGenerationCall") +register_schema(OpenAIActionOpenPage, name="OpenAIActionOpenPage") +register_schema(OpenAIActionSearchSource, name="OpenAIActionSearchSource") +register_schema(OpenAIActionSearch, name="OpenAIActionSearch") +register_schema(OpenAIActionFind, name="OpenAIActionFind") +register_schema(OpenAIResponseFunctionWebSearch, name="OpenAIResponseFunctionWebSearch") +register_schema(OpenAIResponseInputFileContent, name="OpenAIResponseInputFileContent") +register_schema(OpenAIResponseInputTextContent, name="OpenAIResponseInputTextContent") +register_schema(OpenAIResponseInputImageContent, name="OpenAIResponseInputImageContent") +register_schema(OpenAIResponseFunctionCallOutputItem, name="OpenAIResponseFunctionCallOutputItem") +register_schema(OpenAIResponseFunctionCallOutputItemList, name="OpenAIResponseFunctionCallOutputItemList") +register_schema(OpenAIFunctionCallOutput, name="OpenAIFunctionCallOutput") +register_schema(OpenAIMcpListToolsTool, name="OpenAIMcpListToolsTool") +register_schema(OpenAIMcpListTools, name="OpenAIMcpListTools") +register_schema(OpenAIComputerCallOutputAcknowledgedSafetyCheck, name="OpenAIComputerCallOutputAcknowledgedSafetyCheck") +register_schema(OpenAIResponseComputerToolCallOutputScreenshot, name="OpenAIResponseComputerToolCallOutputScreenshot") +register_schema(OpenAIComputerCallOutput, name="OpenAIComputerCallOutput") +register_schema(OpenAILocalShellCallOutput, name="OpenAILocalShellCallOutput") +register_schema(OpenAIMcpCall, name="OpenAIMcpCall") +register_schema(OpenAILogprobTopLogprob, name="OpenAILogprobTopLogprob") +register_schema(OpenAILogprob, name="OpenAILogprob") +register_schema(OpenAIAnnotationFileCitation, name="OpenAIAnnotationFileCitation") +register_schema(OpenAIAnnotationURLCitation, name="OpenAIAnnotationURLCitation") +register_schema(OpenAIAnnotationFilePath, name="OpenAIAnnotationFilePath") +register_schema(OpenAIAnnotationContainerFileCitation, name="OpenAIAnnotationContainerFileCitation") +register_schema(OpenAIResponseOutputText, name="OpenAIResponseOutputText") +register_schema(OpenAIResponseOutputRefusal, name="OpenAIResponseOutputRefusal") +register_schema(OpenAIResponseOutputMessage, name="OpenAIResponseOutputMessage") +register_schema(OpenAIInputAudio, name="OpenAIInputAudio") +register_schema(OpenAIResponseInputAudio, name="OpenAIResponseInputAudio") +register_schema(OpenAIResponseInputContent, name="OpenAIResponseInputContent") +register_schema(OpenAIResponseInputMessageContentList, name="OpenAIResponseInputMessageContentList") +register_schema(OpenAIEasyInputMessage, name="OpenAIEasyInputMessage") +register_schema(OpenAIMcpApprovalResponse, name="OpenAIMcpApprovalResponse") +register_schema(OpenAIMcpApprovalRequest, name="OpenAIMcpApprovalRequest") +register_schema(OpenAILocalShellCallAction, name="OpenAILocalShellCallAction") +register_schema(OpenAILocalShellCall, name="OpenAILocalShellCall") +register_schema(OpenAIMessage, name="OpenAIMessage") +register_schema(OpenAIResponseCustomToolCall, name="OpenAIResponseCustomToolCall") +register_schema(OpenAIResponseFunctionToolCall, name="OpenAIResponseFunctionToolCall") +register_schema(OpenAIOutputLogs, name="OpenAIOutputLogs") +register_schema(OpenAIOutputImage, name="OpenAIOutputImage") +register_schema(OpenAIResponseCodeInterpreterToolCall, name="OpenAIResponseCodeInterpreterToolCall") +register_schema(OpenAISummary, name="OpenAISummary") +register_schema(OpenAIResponseReasoningItem, name="OpenAIResponseReasoningItem") +register_schema(OpenAIResponseInputItem, name="OpenAIResponseInputItem") +register_schema(OpenAIResponseFormatJSONObject, name="OpenAIResponseFormatJSONObject") +register_schema(OpenAIResponseFormatTextJSONSchemaConfig, name="OpenAIResponseFormatTextJSONSchemaConfig") +register_schema(OpenAIResponseFormatText, name="OpenAIResponseFormatText") +register_schema(OpenAIResponseFormatTextConfig, name="OpenAIResponseFormatTextConfig") +register_schema(OpenAIResponseTextConfig, name="OpenAIResponseTextConfig") +register_schema(OpenAIResponseStatus, name="OpenAIResponseStatus") +register_schema(OpenAIIncompleteDetails, name="OpenAIIncompleteDetails") +register_schema(OpenAIResponsePrompt, name="OpenAIResponsePrompt") +register_schema(OpenAIToolChoiceTypes, name="OpenAIToolChoiceTypes") +register_schema(OpenAIToolChoiceAllowed, name="OpenAIToolChoiceAllowed") +register_schema(OpenAIToolChoiceFunction, name="OpenAIToolChoiceFunction") +register_schema(OpenAIToolChoiceCustom, name="OpenAIToolChoiceCustom") +register_schema(OpenAIToolChoiceMcp, name="OpenAIToolChoiceMcp") +register_schema(OpenAIReasoning, name="OpenAIReasoning") +register_schema(OpenAIOutputTokensDetails, name="OpenAIOutputTokensDetails") +register_schema(OpenAIInputTokensDetails, name="OpenAIInputTokensDetails") +register_schema(OpenAIResponseUsage, name="OpenAIResponseUsage") +register_schema(OpenAIResponseError, name="OpenAIResponseError") +register_schema(OpenAIResponseOutputItem, name="OpenAIResponseOutputItem") +register_schema(OpenAICodeInterpreterContainerCodeInterpreterToolAuto, name="OpenAICodeInterpreterContainerCodeInterpreterToolAuto") +register_schema(OpenAICodeInterpreter, name="OpenAICodeInterpreter") +register_schema(OpenAILocalShell, name="OpenAILocalShell") +register_schema(OpenAIGrammar, name="OpenAIGrammar") +register_schema(OpenAIText, name="OpenAIText") +register_schema(OpenAICustomTool, name="OpenAICustomTool") +register_schema(OpenAIUserLocation, name="OpenAIUserLocation") +register_schema(OpenAIWebSearchPreviewTool, name="OpenAIWebSearchPreviewTool") +register_schema(OpenAIImageGenerationInputImageMask, name="OpenAIImageGenerationInputImageMask") +register_schema(OpenAIImageGeneration, name="OpenAIImageGeneration") +register_schema(OpenAIWebSearchTool, name="OpenAIWebSearchTool") +register_schema(OpenAIMcpAllowedToolsMcpToolFilter, name="OpenAIMcpAllowedToolsMcpToolFilter") +register_schema(OpenAIMcpRequireApprovalMcpToolApprovalFilterNever, name="OpenAIMcpRequireApprovalMcpToolApprovalFilterNever") +register_schema(OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways, name="OpenAIMcpRequireApprovalMcpToolApprovalFilterAlways") +register_schema(OpenAIMcpRequireApprovalMcpToolApprovalFilter, name="OpenAIMcpRequireApprovalMcpToolApprovalFilter") +register_schema(OpenAIMcp, name="OpenAIMcp") +register_schema(OpenAIComputerTool, name="OpenAIComputerTool") +register_schema(OpenAIFunctionTool, name="OpenAIFunctionTool") +register_schema(OpenAIRankingOptions, name="OpenAIRankingOptions") +register_schema(OpenAIComparisonFilter, name="OpenAIComparisonFilter") +register_schema(OpenAICompoundFilter, name="OpenAICompoundFilter") +register_schema(OpenAIFileSearchTool, name="OpenAIFileSearchTool") +register_schema(OpenAITool, name="OpenAITool") +register_schema(OpenAIResponse, name="OpenAIResponse") diff --git a/llama_stack/apis/agents/openai_responses.py b/llama_stack/apis/agents/openai_responses.py deleted file mode 100644 index a1ce134b67..0000000000 --- a/llama_stack/apis/agents/openai_responses.py +++ /dev/null @@ -1,1055 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from typing import Annotated, Any, Literal - -from pydantic import BaseModel, Field -from typing_extensions import TypedDict - -from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions -from llama_stack.schema_utils import json_schema_type, register_schema - -# NOTE(ashwin): this file is literally a copy of the OpenAI responses API schema. We should probably -# take their YAML and generate this file automatically. Their YAML is available. - - -@json_schema_type -class OpenAIResponseError(BaseModel): - """Error details for failed OpenAI response requests. - - :param code: Error code identifying the type of failure - :param message: Human-readable error message describing the failure - """ - - code: str - message: str - - -@json_schema_type -class OpenAIResponseInputMessageContentText(BaseModel): - """Text content for input messages in OpenAI response format. - - :param text: The text content of the input message - :param type: Content type identifier, always "input_text" - """ - - text: str - type: Literal["input_text"] = "input_text" - - -@json_schema_type -class OpenAIResponseInputMessageContentImage(BaseModel): - """Image content for input messages in OpenAI response format. - - :param detail: Level of detail for image processing, can be "low", "high", or "auto" - :param type: Content type identifier, always "input_image" - :param image_url: (Optional) URL of the image content - """ - - detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto" - type: Literal["input_image"] = "input_image" - # TODO: handle file_id - image_url: str | None = None - - -# TODO: handle file content types -OpenAIResponseInputMessageContent = Annotated[ - OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage, - Field(discriminator="type"), -] -register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent") - - -@json_schema_type -class OpenAIResponseAnnotationFileCitation(BaseModel): - """File citation annotation for referencing specific files in response content. - - :param type: Annotation type identifier, always "file_citation" - :param file_id: Unique identifier of the referenced file - :param filename: Name of the referenced file - :param index: Position index of the citation within the content - """ - - type: Literal["file_citation"] = "file_citation" - file_id: str - filename: str - index: int - - -@json_schema_type -class OpenAIResponseAnnotationCitation(BaseModel): - """URL citation annotation for referencing external web resources. - - :param type: Annotation type identifier, always "url_citation" - :param end_index: End position of the citation span in the content - :param start_index: Start position of the citation span in the content - :param title: Title of the referenced web resource - :param url: URL of the referenced web resource - """ - - type: Literal["url_citation"] = "url_citation" - end_index: int - start_index: int - title: str - url: str - - -@json_schema_type -class OpenAIResponseAnnotationContainerFileCitation(BaseModel): - type: Literal["container_file_citation"] = "container_file_citation" - container_id: str - end_index: int - file_id: str - filename: str - start_index: int - - -@json_schema_type -class OpenAIResponseAnnotationFilePath(BaseModel): - type: Literal["file_path"] = "file_path" - file_id: str - index: int - - -OpenAIResponseAnnotations = Annotated[ - OpenAIResponseAnnotationFileCitation - | OpenAIResponseAnnotationCitation - | OpenAIResponseAnnotationContainerFileCitation - | OpenAIResponseAnnotationFilePath, - Field(discriminator="type"), -] -register_schema(OpenAIResponseAnnotations, name="OpenAIResponseAnnotations") - - -@json_schema_type -class OpenAIResponseOutputMessageContentOutputText(BaseModel): - text: str - type: Literal["output_text"] = "output_text" - annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list) - - -OpenAIResponseOutputMessageContent = Annotated[ - OpenAIResponseOutputMessageContentOutputText, - Field(discriminator="type"), -] -register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMessageContent") - - -@json_schema_type -class OpenAIResponseMessage(BaseModel): - """ - Corresponds to the various Message types in the Responses API. - They are all under one type because the Responses API gives them all - the same "type" value, and there is no way to tell them apart in certain - scenarios. - """ - - content: str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent] - role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"] - type: Literal["message"] = "message" - - # The fields below are not used in all scenarios, but are required in others. - id: str | None = None - status: str | None = None - - -@json_schema_type -class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel): - """Web search tool call output message for OpenAI responses. - - :param id: Unique identifier for this tool call - :param status: Current status of the web search operation - :param type: Tool call type identifier, always "web_search_call" - """ - - id: str - status: str - type: Literal["web_search_call"] = "web_search_call" - - -class OpenAIResponseOutputMessageFileSearchToolCallResults(BaseModel): - """Search results returned by the file search operation. - - :param attributes: (Optional) Key-value attributes associated with the file - :param file_id: Unique identifier of the file containing the result - :param filename: Name of the file containing the result - :param score: Relevance score for this search result (between 0 and 1) - :param text: Text content of the search result - """ - - attributes: dict[str, Any] - file_id: str - filename: str - score: float - text: str - - -@json_schema_type -class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel): - """File search tool call output message for OpenAI responses. - - :param id: Unique identifier for this tool call - :param queries: List of search queries executed - :param status: Current status of the file search operation - :param type: Tool call type identifier, always "file_search_call" - :param results: (Optional) Search results returned by the file search operation - """ - - id: str - queries: list[str] - status: str - type: Literal["file_search_call"] = "file_search_call" - results: list[OpenAIResponseOutputMessageFileSearchToolCallResults] | None = None - - -@json_schema_type -class OpenAIResponseOutputMessageFunctionToolCall(BaseModel): - """Function tool call output message for OpenAI responses. - - :param call_id: Unique identifier for the function call - :param name: Name of the function being called - :param arguments: JSON string containing the function arguments - :param type: Tool call type identifier, always "function_call" - :param id: (Optional) Additional identifier for the tool call - :param status: (Optional) Current status of the function call execution - """ - - call_id: str - name: str - arguments: str - type: Literal["function_call"] = "function_call" - id: str | None = None - status: str | None = None - - -@json_schema_type -class OpenAIResponseOutputMessageMCPCall(BaseModel): - """Model Context Protocol (MCP) call output message for OpenAI responses. - - :param id: Unique identifier for this MCP call - :param type: Tool call type identifier, always "mcp_call" - :param arguments: JSON string containing the MCP call arguments - :param name: Name of the MCP method being called - :param server_label: Label identifying the MCP server handling the call - :param error: (Optional) Error message if the MCP call failed - :param output: (Optional) Output result from the successful MCP call - """ - - id: str - type: Literal["mcp_call"] = "mcp_call" - arguments: str - name: str - server_label: str - error: str | None = None - output: str | None = None - - -class MCPListToolsTool(BaseModel): - """Tool definition returned by MCP list tools operation. - - :param input_schema: JSON schema defining the tool's input parameters - :param name: Name of the tool - :param description: (Optional) Description of what the tool does - """ - - input_schema: dict[str, Any] - name: str - description: str | None = None - - -@json_schema_type -class OpenAIResponseOutputMessageMCPListTools(BaseModel): - """MCP list tools output message containing available tools from an MCP server. - - :param id: Unique identifier for this MCP list tools operation - :param type: Tool call type identifier, always "mcp_list_tools" - :param server_label: Label identifying the MCP server providing the tools - :param tools: List of available tools provided by the MCP server - """ - - id: str - type: Literal["mcp_list_tools"] = "mcp_list_tools" - server_label: str - tools: list[MCPListToolsTool] - - -@json_schema_type -class OpenAIResponseMCPApprovalRequest(BaseModel): - """ - A request for human approval of a tool invocation. - """ - - arguments: str - id: str - name: str - server_label: str - type: Literal["mcp_approval_request"] = "mcp_approval_request" - - -@json_schema_type -class OpenAIResponseMCPApprovalResponse(BaseModel): - """ - A response to an MCP approval request. - """ - - approval_request_id: str - approve: bool - type: Literal["mcp_approval_response"] = "mcp_approval_response" - id: str | None = None - reason: str | None = None - - -OpenAIResponseOutput = Annotated[ - OpenAIResponseMessage - | OpenAIResponseOutputMessageWebSearchToolCall - | OpenAIResponseOutputMessageFileSearchToolCall - | OpenAIResponseOutputMessageFunctionToolCall - | OpenAIResponseOutputMessageMCPCall - | OpenAIResponseOutputMessageMCPListTools - | OpenAIResponseMCPApprovalRequest, - Field(discriminator="type"), -] -register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput") - - -# This has to be a TypedDict because we need a "schema" field and our strong -# typing code in the schema generator doesn't support Pydantic aliases. That also -# means we can't use a discriminator field here, because TypedDicts don't support -# default values which the strong typing code requires for discriminators. -class OpenAIResponseTextFormat(TypedDict, total=False): - """Configuration for Responses API text format. - - :param type: Must be "text", "json_schema", or "json_object" to identify the format type - :param name: The name of the response format. Only used for json_schema. - :param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema. - :param description: (Optional) A description of the response format. Only used for json_schema. - :param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema. - """ - - type: Literal["text"] | Literal["json_schema"] | Literal["json_object"] - name: str | None - schema: dict[str, Any] | None - description: str | None - strict: bool | None - - -@json_schema_type -class OpenAIResponseText(BaseModel): - """Text response configuration for OpenAI responses. - - :param format: (Optional) Text format configuration specifying output format requirements - """ - - format: OpenAIResponseTextFormat | None = None - - -# Must match type Literals of OpenAIResponseInputToolWebSearch below -WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11"] - - -@json_schema_type -class OpenAIResponseInputToolWebSearch(BaseModel): - """Web search tool configuration for OpenAI response inputs. - - :param type: Web search tool type variant to use - :param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high" - """ - - # Must match values of WebSearchToolTypes above - type: Literal["web_search"] | Literal["web_search_preview"] | Literal["web_search_preview_2025_03_11"] = ( - "web_search" - ) - # TODO: actually use search_context_size somewhere... - search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$") - # TODO: add user_location - - -@json_schema_type -class OpenAIResponseInputToolFunction(BaseModel): - """Function tool configuration for OpenAI response inputs. - - :param type: Tool type identifier, always "function" - :param name: Name of the function that can be called - :param description: (Optional) Description of what the function does - :param parameters: (Optional) JSON schema defining the function's parameters - :param strict: (Optional) Whether to enforce strict parameter validation - """ - - type: Literal["function"] = "function" - name: str - description: str | None = None - parameters: dict[str, Any] | None - strict: bool | None = None - - -@json_schema_type -class OpenAIResponseInputToolFileSearch(BaseModel): - """File search tool configuration for OpenAI response inputs. - - :param type: Tool type identifier, always "file_search" - :param vector_store_ids: List of vector store identifiers to search within - :param filters: (Optional) Additional filters to apply to the search - :param max_num_results: (Optional) Maximum number of search results to return (1-50) - :param ranking_options: (Optional) Options for ranking and scoring search results - """ - - type: Literal["file_search"] = "file_search" - vector_store_ids: list[str] - filters: dict[str, Any] | None = None - max_num_results: int | None = Field(default=10, ge=1, le=50) - ranking_options: FileSearchRankingOptions | None = None - - -class ApprovalFilter(BaseModel): - """Filter configuration for MCP tool approval requirements. - - :param always: (Optional) List of tool names that always require approval - :param never: (Optional) List of tool names that never require approval - """ - - always: list[str] | None = None - never: list[str] | None = None - - -class AllowedToolsFilter(BaseModel): - """Filter configuration for restricting which MCP tools can be used. - - :param tool_names: (Optional) List of specific tool names that are allowed - """ - - tool_names: list[str] | None = None - - -@json_schema_type -class OpenAIResponseInputToolMCP(BaseModel): - """Model Context Protocol (MCP) tool configuration for OpenAI response inputs. - - :param type: Tool type identifier, always "mcp" - :param server_label: Label to identify this MCP server - :param server_url: URL endpoint of the MCP server - :param headers: (Optional) HTTP headers to include when connecting to the server - :param require_approval: Approval requirement for tool calls ("always", "never", or filter) - :param allowed_tools: (Optional) Restriction on which tools can be used from this server - """ - - type: Literal["mcp"] = "mcp" - server_label: str - server_url: str - headers: dict[str, Any] | None = None - - require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never" - allowed_tools: list[str] | AllowedToolsFilter | None = None - - -OpenAIResponseInputTool = Annotated[ - OpenAIResponseInputToolWebSearch - | OpenAIResponseInputToolFileSearch - | OpenAIResponseInputToolFunction - | OpenAIResponseInputToolMCP, - Field(discriminator="type"), -] -register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool") - - -@json_schema_type -class OpenAIResponseToolMCP(BaseModel): - """Model Context Protocol (MCP) tool configuration for OpenAI response object. - - :param type: Tool type identifier, always "mcp" - :param server_label: Label to identify this MCP server - :param allowed_tools: (Optional) Restriction on which tools can be used from this server - """ - - type: Literal["mcp"] = "mcp" - server_label: str - allowed_tools: list[str] | AllowedToolsFilter | None = None - - -OpenAIResponseTool = Annotated[ - OpenAIResponseInputToolWebSearch - | OpenAIResponseInputToolFileSearch - | OpenAIResponseInputToolFunction - | OpenAIResponseToolMCP, # The only type that differes from that in the inputs is the MCP tool - Field(discriminator="type"), -] -register_schema(OpenAIResponseTool, name="OpenAIResponseTool") - - -class OpenAIResponseUsageOutputTokensDetails(BaseModel): - """Token details for output tokens in OpenAI response usage. - - :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models) - """ - - reasoning_tokens: int | None = None - - -class OpenAIResponseUsageInputTokensDetails(BaseModel): - """Token details for input tokens in OpenAI response usage. - - :param cached_tokens: Number of tokens retrieved from cache - """ - - cached_tokens: int | None = None - - -@json_schema_type -class OpenAIResponseUsage(BaseModel): - """Usage information for OpenAI response. - - :param input_tokens: Number of tokens in the input - :param output_tokens: Number of tokens in the output - :param total_tokens: Total tokens used (input + output) - :param input_tokens_details: Detailed breakdown of input token usage - :param output_tokens_details: Detailed breakdown of output token usage - """ - - input_tokens: int - output_tokens: int - total_tokens: int - input_tokens_details: OpenAIResponseUsageInputTokensDetails | None = None - output_tokens_details: OpenAIResponseUsageOutputTokensDetails | None = None - - -@json_schema_type -class OpenAIResponseObject(BaseModel): - """Complete OpenAI response object containing generation results and metadata. - - :param created_at: Unix timestamp when the response was created - :param error: (Optional) Error details if the response generation failed - :param id: Unique identifier for this response - :param model: Model identifier used for generation - :param object: Object type identifier, always "response" - :param output: List of generated output items (messages, tool calls, etc.) - :param parallel_tool_calls: Whether tool calls can be executed in parallel - :param previous_response_id: (Optional) ID of the previous response in a conversation - :param status: Current status of the response generation - :param temperature: (Optional) Sampling temperature used for generation - :param text: Text formatting configuration for the response - :param top_p: (Optional) Nucleus sampling parameter used for generation - :param tools: (Optional) An array of tools the model may call while generating a response. - :param truncation: (Optional) Truncation strategy applied to the response - :param usage: (Optional) Token usage information for the response - """ - - created_at: int - error: OpenAIResponseError | None = None - id: str - model: str - object: Literal["response"] = "response" - output: list[OpenAIResponseOutput] - parallel_tool_calls: bool = False - previous_response_id: str | None = None - status: str - temperature: float | None = None - # Default to text format to avoid breaking the loading of old responses - # before the field was added. New responses will have this set always. - text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")) - top_p: float | None = None - tools: list[OpenAIResponseTool] | None = None - truncation: str | None = None - usage: OpenAIResponseUsage | None = None - - -@json_schema_type -class OpenAIDeleteResponseObject(BaseModel): - """Response object confirming deletion of an OpenAI response. - - :param id: Unique identifier of the deleted response - :param object: Object type identifier, always "response" - :param deleted: Deletion confirmation flag, always True - """ - - id: str - object: Literal["response"] = "response" - deleted: bool = True - - -@json_schema_type -class OpenAIResponseObjectStreamResponseCreated(BaseModel): - """Streaming event indicating a new response has been created. - - :param response: The response object that was created - :param type: Event type identifier, always "response.created" - """ - - response: OpenAIResponseObject - type: Literal["response.created"] = "response.created" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseInProgress(BaseModel): - """Streaming event indicating the response remains in progress. - - :param response: Current response state while in progress - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.in_progress" - """ - - response: OpenAIResponseObject - sequence_number: int - type: Literal["response.in_progress"] = "response.in_progress" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseCompleted(BaseModel): - """Streaming event indicating a response has been completed. - - :param response: Completed response object - :param type: Event type identifier, always "response.completed" - """ - - response: OpenAIResponseObject - type: Literal["response.completed"] = "response.completed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseIncomplete(BaseModel): - """Streaming event emitted when a response ends in an incomplete state. - - :param response: Response object describing the incomplete state - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.incomplete" - """ - - response: OpenAIResponseObject - sequence_number: int - type: Literal["response.incomplete"] = "response.incomplete" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseFailed(BaseModel): - """Streaming event emitted when a response fails. - - :param response: Response object describing the failure - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.failed" - """ - - response: OpenAIResponseObject - sequence_number: int - type: Literal["response.failed"] = "response.failed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel): - """Streaming event for when a new output item is added to the response. - - :param response_id: Unique identifier of the response containing this output - :param item: The output item that was added (message, tool call, etc.) - :param output_index: Index position of this item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.output_item.added" - """ - - response_id: str - item: OpenAIResponseOutput - output_index: int - sequence_number: int - type: Literal["response.output_item.added"] = "response.output_item.added" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel): - """Streaming event for when an output item is completed. - - :param response_id: Unique identifier of the response containing this output - :param item: The completed output item (message, tool call, etc.) - :param output_index: Index position of this item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.output_item.done" - """ - - response_id: str - item: OpenAIResponseOutput - output_index: int - sequence_number: int - type: Literal["response.output_item.done"] = "response.output_item.done" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel): - """Streaming event for incremental text content updates. - - :param content_index: Index position within the text content - :param delta: Incremental text content being added - :param item_id: Unique identifier of the output item being updated - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.output_text.delta" - """ - - content_index: int - delta: str - item_id: str - output_index: int - sequence_number: int - type: Literal["response.output_text.delta"] = "response.output_text.delta" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel): - """Streaming event for when text output is completed. - - :param content_index: Index position within the text content - :param text: Final complete text content of the output item - :param item_id: Unique identifier of the completed output item - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.output_text.done" - """ - - content_index: int - text: str # final text of the output item - item_id: str - output_index: int - sequence_number: int - type: Literal["response.output_text.done"] = "response.output_text.done" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel): - """Streaming event for incremental function call argument updates. - - :param delta: Incremental function call arguments being added - :param item_id: Unique identifier of the function call being updated - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.function_call_arguments.delta" - """ - - delta: str - item_id: str - output_index: int - sequence_number: int - type: Literal["response.function_call_arguments.delta"] = "response.function_call_arguments.delta" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel): - """Streaming event for when function call arguments are completed. - - :param arguments: Final complete arguments JSON string for the function call - :param item_id: Unique identifier of the completed function call - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.function_call_arguments.done" - """ - - arguments: str # final arguments of the function call - item_id: str - output_index: int - sequence_number: int - type: Literal["response.function_call_arguments.done"] = "response.function_call_arguments.done" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel): - """Streaming event for web search calls in progress. - - :param item_id: Unique identifier of the web search call - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.web_search_call.in_progress" - """ - - item_id: str - output_index: int - sequence_number: int - type: Literal["response.web_search_call.in_progress"] = "response.web_search_call.in_progress" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel): - item_id: str - output_index: int - sequence_number: int - type: Literal["response.web_search_call.searching"] = "response.web_search_call.searching" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel): - """Streaming event for completed web search calls. - - :param item_id: Unique identifier of the completed web search call - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.web_search_call.completed" - """ - - item_id: str - output_index: int - sequence_number: int - type: Literal["response.web_search_call.completed"] = "response.web_search_call.completed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel): - sequence_number: int - type: Literal["response.mcp_list_tools.in_progress"] = "response.mcp_list_tools.in_progress" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpListToolsFailed(BaseModel): - sequence_number: int - type: Literal["response.mcp_list_tools.failed"] = "response.mcp_list_tools.failed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpListToolsCompleted(BaseModel): - sequence_number: int - type: Literal["response.mcp_list_tools.completed"] = "response.mcp_list_tools.completed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel): - delta: str - item_id: str - output_index: int - sequence_number: int - type: Literal["response.mcp_call.arguments.delta"] = "response.mcp_call.arguments.delta" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel): - arguments: str # final arguments of the MCP call - item_id: str - output_index: int - sequence_number: int - type: Literal["response.mcp_call.arguments.done"] = "response.mcp_call.arguments.done" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel): - """Streaming event for MCP calls in progress. - - :param item_id: Unique identifier of the MCP call - :param output_index: Index position of the item in the output list - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.mcp_call.in_progress" - """ - - item_id: str - output_index: int - sequence_number: int - type: Literal["response.mcp_call.in_progress"] = "response.mcp_call.in_progress" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel): - """Streaming event for failed MCP calls. - - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.mcp_call.failed" - """ - - sequence_number: int - type: Literal["response.mcp_call.failed"] = "response.mcp_call.failed" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel): - """Streaming event for completed MCP calls. - - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.mcp_call.completed" - """ - - sequence_number: int - type: Literal["response.mcp_call.completed"] = "response.mcp_call.completed" - - -@json_schema_type -class OpenAIResponseContentPartOutputText(BaseModel): - """Text content within a streamed response part. - - :param type: Content part type identifier, always "output_text" - :param text: Text emitted for this content part - :param annotations: Structured annotations associated with the text - :param logprobs: (Optional) Token log probability details - """ - - type: Literal["output_text"] = "output_text" - text: str - annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list) - logprobs: list[dict[str, Any]] | None = None - - -@json_schema_type -class OpenAIResponseContentPartRefusal(BaseModel): - """Refusal content within a streamed response part. - - :param type: Content part type identifier, always "refusal" - :param refusal: Refusal text supplied by the model - """ - - type: Literal["refusal"] = "refusal" - refusal: str - - -@json_schema_type -class OpenAIResponseContentPartReasoningText(BaseModel): - """Reasoning text emitted as part of a streamed response. - - :param type: Content part type identifier, always "reasoning_text" - :param text: Reasoning text supplied by the model - """ - - type: Literal["reasoning_text"] = "reasoning_text" - text: str - - -OpenAIResponseContentPart = Annotated[ - OpenAIResponseContentPartOutputText | OpenAIResponseContentPartRefusal | OpenAIResponseContentPartReasoningText, - Field(discriminator="type"), -] -register_schema(OpenAIResponseContentPart, name="OpenAIResponseContentPart") - - -@json_schema_type -class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel): - """Streaming event for when a new content part is added to a response item. - - :param content_index: Index position of the part within the content array - :param response_id: Unique identifier of the response containing this content - :param item_id: Unique identifier of the output item containing this content part - :param output_index: Index position of the output item in the response - :param part: The content part that was added - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.content_part.added" - """ - - content_index: int - response_id: str - item_id: str - output_index: int - part: OpenAIResponseContentPart - sequence_number: int - type: Literal["response.content_part.added"] = "response.content_part.added" - - -@json_schema_type -class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel): - """Streaming event for when a content part is completed. - - :param content_index: Index position of the part within the content array - :param response_id: Unique identifier of the response containing this content - :param item_id: Unique identifier of the output item containing this content part - :param output_index: Index position of the output item in the response - :param part: The completed content part - :param sequence_number: Sequential number for ordering streaming events - :param type: Event type identifier, always "response.content_part.done" - """ - - content_index: int - response_id: str - item_id: str - output_index: int - part: OpenAIResponseContentPart - sequence_number: int - type: Literal["response.content_part.done"] = "response.content_part.done" - - -OpenAIResponseObjectStream = Annotated[ - OpenAIResponseObjectStreamResponseCreated - | OpenAIResponseObjectStreamResponseInProgress - | OpenAIResponseObjectStreamResponseOutputItemAdded - | OpenAIResponseObjectStreamResponseOutputItemDone - | OpenAIResponseObjectStreamResponseOutputTextDelta - | OpenAIResponseObjectStreamResponseOutputTextDone - | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta - | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone - | OpenAIResponseObjectStreamResponseWebSearchCallInProgress - | OpenAIResponseObjectStreamResponseWebSearchCallSearching - | OpenAIResponseObjectStreamResponseWebSearchCallCompleted - | OpenAIResponseObjectStreamResponseMcpListToolsInProgress - | OpenAIResponseObjectStreamResponseMcpListToolsFailed - | OpenAIResponseObjectStreamResponseMcpListToolsCompleted - | OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta - | OpenAIResponseObjectStreamResponseMcpCallArgumentsDone - | OpenAIResponseObjectStreamResponseMcpCallInProgress - | OpenAIResponseObjectStreamResponseMcpCallFailed - | OpenAIResponseObjectStreamResponseMcpCallCompleted - | OpenAIResponseObjectStreamResponseContentPartAdded - | OpenAIResponseObjectStreamResponseContentPartDone - | OpenAIResponseObjectStreamResponseIncomplete - | OpenAIResponseObjectStreamResponseFailed - | OpenAIResponseObjectStreamResponseCompleted, - Field(discriminator="type"), -] -register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream") - - -@json_schema_type -class OpenAIResponseInputFunctionToolCallOutput(BaseModel): - """ - This represents the output of a function call that gets passed back to the model. - """ - - call_id: str - output: str - type: Literal["function_call_output"] = "function_call_output" - id: str | None = None - status: str | None = None - - -OpenAIResponseInput = Annotated[ - # Responses API allows output messages to be passed in as input - OpenAIResponseOutputMessageWebSearchToolCall - | OpenAIResponseOutputMessageFileSearchToolCall - | OpenAIResponseOutputMessageFunctionToolCall - | OpenAIResponseInputFunctionToolCallOutput - | OpenAIResponseMCPApprovalRequest - | OpenAIResponseMCPApprovalResponse - | - # Fallback to the generic message type as a last resort - OpenAIResponseMessage, - Field(union_mode="left_to_right"), -] -register_schema(OpenAIResponseInput, name="OpenAIResponseInput") - - -class ListOpenAIResponseInputItem(BaseModel): - """List container for OpenAI response input items. - - :param data: List of input items - :param object: Object type identifier, always "list" - """ - - data: list[OpenAIResponseInput] - object: Literal["list"] = "list" - - -@json_schema_type -class OpenAIResponseObjectWithInput(OpenAIResponseObject): - """OpenAI response object extended with input context information. - - :param input: List of input items that led to this response - """ - - input: list[OpenAIResponseInput] - - def to_response_object(self) -> OpenAIResponseObject: - """Convert to OpenAIResponseObject by excluding input field.""" - return OpenAIResponseObject(**{k: v for k, v in self.model_dump().items() if k != "input"}) - - -@json_schema_type -class ListOpenAIResponseObject(BaseModel): - """Paginated list of OpenAI response objects with navigation metadata. - - :param data: List of response objects with their input context - :param has_more: Whether there are more results available beyond this page - :param first_id: Identifier of the first item in this page - :param last_id: Identifier of the last item in this page - :param object: Object type identifier, always "list" - """ - - data: list[OpenAIResponseObjectWithInput] - has_more: bool - first_id: str - last_id: str - object: Literal["list"] = "list" diff --git a/llama_stack/apis/agents/openai_responses/__init__.py b/llama_stack/apis/agents/openai_responses/__init__.py new file mode 100644 index 0000000000..7cd4c5e23d --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/__init__.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +"""OpenAI Responses schema re-exports.""" + +# This package mirrors the original openai_responses.py module but splits the +# definitions into focused submodules. The generator at +# scripts/generate_openai_responses.py targets this layout so the code can be +# updated automatically in manageable chunks. + +from .errors import OpenAIResponseError + +from .inputs import ( + OpenAIResponseInput, + ListOpenAIResponseInputItem, +) + +from .messages import ( + OpenAIResponseInputMessageContentText, + OpenAIResponseInputMessageContentImage, + OpenAIResponseInputMessageContent, + OpenAIResponseAnnotationFileCitation, + OpenAIResponseAnnotationCitation, + OpenAIResponseAnnotationContainerFileCitation, + OpenAIResponseAnnotationFilePath, + OpenAIResponseAnnotations, + OpenAIResponseOutputMessageContentOutputText, + OpenAIResponseOutputMessageContent, + OpenAIResponseMessage, +) + +from .objects import ( + OpenAIResponseObject, + OpenAIDeleteResponseObject, + OpenAIResponseObjectStreamResponseCreated, + OpenAIResponseObjectStreamResponseInProgress, + OpenAIResponseObjectStreamResponseCompleted, + OpenAIResponseObjectStreamResponseIncomplete, + OpenAIResponseObjectStreamResponseFailed, + OpenAIResponseObjectStreamResponseOutputItemAdded, + OpenAIResponseObjectStreamResponseOutputItemDone, + OpenAIResponseObjectStreamResponseOutputTextDelta, + OpenAIResponseObjectStreamResponseOutputTextDone, + OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta, + OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone, + OpenAIResponseObjectStreamResponseWebSearchCallInProgress, + OpenAIResponseObjectStreamResponseWebSearchCallSearching, + OpenAIResponseObjectStreamResponseWebSearchCallCompleted, + OpenAIResponseObjectStreamResponseMcpListToolsInProgress, + OpenAIResponseObjectStreamResponseMcpListToolsFailed, + OpenAIResponseObjectStreamResponseMcpListToolsCompleted, + OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta, + OpenAIResponseObjectStreamResponseMcpCallArgumentsDone, + OpenAIResponseObjectStreamResponseMcpCallInProgress, + OpenAIResponseObjectStreamResponseMcpCallFailed, + OpenAIResponseObjectStreamResponseMcpCallCompleted, + OpenAIResponseObjectStreamResponseContentPartAdded, + OpenAIResponseObjectStreamResponseContentPartDone, + OpenAIResponseObjectStream, + OpenAIResponseObjectWithInput, + ListOpenAIResponseObject, +) + +from .outputs import ( + OpenAIResponseOutput, + OpenAIResponseTextFormat, + OpenAIResponseText, + OpenAIResponseContentPartOutputText, + OpenAIResponseContentPartRefusal, + OpenAIResponseContentPartReasoningText, + OpenAIResponseContentPart, +) + +from .tool_calls import ( + OpenAIResponseOutputMessageWebSearchToolCall, + OpenAIResponseOutputMessageFileSearchToolCallResults, + OpenAIResponseOutputMessageFileSearchToolCall, + OpenAIResponseOutputMessageFunctionToolCall, + OpenAIResponseOutputMessageMCPCall, + MCPListToolsTool, + OpenAIResponseOutputMessageMCPListTools, + OpenAIResponseMCPApprovalRequest, + OpenAIResponseMCPApprovalResponse, + OpenAIResponseInputFunctionToolCallOutput, +) + +from .tools import ( + WebSearchToolTypes, + OpenAIResponseInputToolWebSearch, + OpenAIResponseInputToolFunction, + OpenAIResponseInputToolFileSearch, + ApprovalFilter, + AllowedToolsFilter, + OpenAIResponseInputToolMCP, + OpenAIResponseInputTool, + OpenAIResponseToolMCP, + OpenAIResponseTool, +) + +from .usage import ( + OpenAIResponseUsageOutputTokensDetails, + OpenAIResponseUsageInputTokensDetails, + OpenAIResponseUsage, +) + +__all__ = [ + 'OpenAIResponseError', + 'OpenAIResponseInput', + 'ListOpenAIResponseInputItem', + 'OpenAIResponseInputMessageContentText', + 'OpenAIResponseInputMessageContentImage', + 'OpenAIResponseInputMessageContent', + 'OpenAIResponseAnnotationFileCitation', + 'OpenAIResponseAnnotationCitation', + 'OpenAIResponseAnnotationContainerFileCitation', + 'OpenAIResponseAnnotationFilePath', + 'OpenAIResponseAnnotations', + 'OpenAIResponseOutputMessageContentOutputText', + 'OpenAIResponseOutputMessageContent', + 'OpenAIResponseMessage', + 'OpenAIResponseObject', + 'OpenAIDeleteResponseObject', + 'OpenAIResponseObjectStreamResponseCreated', + 'OpenAIResponseObjectStreamResponseInProgress', + 'OpenAIResponseObjectStreamResponseCompleted', + 'OpenAIResponseObjectStreamResponseIncomplete', + 'OpenAIResponseObjectStreamResponseFailed', + 'OpenAIResponseObjectStreamResponseOutputItemAdded', + 'OpenAIResponseObjectStreamResponseOutputItemDone', + 'OpenAIResponseObjectStreamResponseOutputTextDelta', + 'OpenAIResponseObjectStreamResponseOutputTextDone', + 'OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta', + 'OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone', + 'OpenAIResponseObjectStreamResponseWebSearchCallInProgress', + 'OpenAIResponseObjectStreamResponseWebSearchCallSearching', + 'OpenAIResponseObjectStreamResponseWebSearchCallCompleted', + 'OpenAIResponseObjectStreamResponseMcpListToolsInProgress', + 'OpenAIResponseObjectStreamResponseMcpListToolsFailed', + 'OpenAIResponseObjectStreamResponseMcpListToolsCompleted', + 'OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta', + 'OpenAIResponseObjectStreamResponseMcpCallArgumentsDone', + 'OpenAIResponseObjectStreamResponseMcpCallInProgress', + 'OpenAIResponseObjectStreamResponseMcpCallFailed', + 'OpenAIResponseObjectStreamResponseMcpCallCompleted', + 'OpenAIResponseObjectStreamResponseContentPartAdded', + 'OpenAIResponseObjectStreamResponseContentPartDone', + 'OpenAIResponseObjectStream', + 'OpenAIResponseObjectWithInput', + 'ListOpenAIResponseObject', + 'OpenAIResponseOutput', + 'OpenAIResponseTextFormat', + 'OpenAIResponseText', + 'OpenAIResponseContentPartOutputText', + 'OpenAIResponseContentPartRefusal', + 'OpenAIResponseContentPartReasoningText', + 'OpenAIResponseContentPart', + 'OpenAIResponseOutputMessageWebSearchToolCall', + 'OpenAIResponseOutputMessageFileSearchToolCallResults', + 'OpenAIResponseOutputMessageFileSearchToolCall', + 'OpenAIResponseOutputMessageFunctionToolCall', + 'OpenAIResponseOutputMessageMCPCall', + 'MCPListToolsTool', + 'OpenAIResponseOutputMessageMCPListTools', + 'OpenAIResponseMCPApprovalRequest', + 'OpenAIResponseMCPApprovalResponse', + 'OpenAIResponseInputFunctionToolCallOutput', + 'WebSearchToolTypes', + 'OpenAIResponseInputToolWebSearch', + 'OpenAIResponseInputToolFunction', + 'OpenAIResponseInputToolFileSearch', + 'ApprovalFilter', + 'AllowedToolsFilter', + 'OpenAIResponseInputToolMCP', + 'OpenAIResponseInputTool', + 'OpenAIResponseToolMCP', + 'OpenAIResponseTool', + 'OpenAIResponseUsageOutputTokensDetails', + 'OpenAIResponseUsageInputTokensDetails', + 'OpenAIResponseUsage', +] diff --git a/llama_stack/apis/agents/openai_responses/errors.py b/llama_stack/apis/agents/openai_responses/errors.py new file mode 100644 index 0000000000..108767dd89 --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/errors.py @@ -0,0 +1,18 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from pydantic import BaseModel +from llama_stack.schema_utils import json_schema_type, register_schema + +@json_schema_type +class OpenAIResponseError(BaseModel): + """Error details for failed OpenAI response requests. + + :param code: Error code identifying the type of failure + :param message: Human-readable error message describing the failure + """ + + code: str + message: str diff --git a/llama_stack/apis/agents/openai_responses/inputs.py b/llama_stack/apis/agents/openai_responses/inputs.py new file mode 100644 index 0000000000..ef04b16ec7 --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/inputs.py @@ -0,0 +1,38 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Annotated +from pydantic import BaseModel, Field +from typing_extensions import Literal +from llama_stack.schema_utils import json_schema_type, register_schema +from .messages import OpenAIResponseMessage +from .tool_calls import OpenAIResponseInputFunctionToolCallOutput, OpenAIResponseMCPApprovalRequest, OpenAIResponseMCPApprovalResponse, OpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseOutputMessageFunctionToolCall, OpenAIResponseOutputMessageWebSearchToolCall + +OpenAIResponseInput = Annotated[ + # Responses API allows output messages to be passed in as input + OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseOutputMessageFileSearchToolCall + | OpenAIResponseOutputMessageFunctionToolCall + | OpenAIResponseInputFunctionToolCallOutput + | OpenAIResponseMCPApprovalRequest + | OpenAIResponseMCPApprovalResponse + | + # Fallback to the generic message type as a last resort + OpenAIResponseMessage, + Field(union_mode="left_to_right"), +] + +register_schema(OpenAIResponseInput, name="OpenAIResponseInput") + + +class ListOpenAIResponseInputItem(BaseModel): + """List container for OpenAI response input items. + + :param data: List of input items + :param object: Object type identifier, always "list" + """ + + data: list[OpenAIResponseInput] + object: Literal["list"] = "list" diff --git a/llama_stack/apis/agents/openai_responses/messages.py b/llama_stack/apis/agents/openai_responses/messages.py new file mode 100644 index 0000000000..fc34c34fec --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/messages.py @@ -0,0 +1,138 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Annotated +from pydantic import BaseModel, Field +from typing_extensions import Literal +from llama_stack.schema_utils import json_schema_type, register_schema + +@json_schema_type +class OpenAIResponseInputMessageContentText(BaseModel): + """Text content for input messages in OpenAI response format. + + :param text: The text content of the input message + :param type: Content type identifier, always "input_text" + """ + + text: str + type: Literal["input_text"] = "input_text" + + +@json_schema_type +class OpenAIResponseInputMessageContentImage(BaseModel): + """Image content for input messages in OpenAI response format. + + :param detail: Level of detail for image processing, can be "low", "high", or "auto" + :param type: Content type identifier, always "input_image" + :param image_url: (Optional) URL of the image content + """ + + detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto" + type: Literal["input_image"] = "input_image" + # TODO: handle file_id + image_url: str | None = None + + +OpenAIResponseInputMessageContent = Annotated[ + OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent") + + +@json_schema_type +class OpenAIResponseAnnotationFileCitation(BaseModel): + """File citation annotation for referencing specific files in response content. + + :param type: Annotation type identifier, always "file_citation" + :param file_id: Unique identifier of the referenced file + :param filename: Name of the referenced file + :param index: Position index of the citation within the content + """ + + type: Literal["file_citation"] = "file_citation" + file_id: str + filename: str + index: int + + +@json_schema_type +class OpenAIResponseAnnotationCitation(BaseModel): + """URL citation annotation for referencing external web resources. + + :param type: Annotation type identifier, always "url_citation" + :param end_index: End position of the citation span in the content + :param start_index: Start position of the citation span in the content + :param title: Title of the referenced web resource + :param url: URL of the referenced web resource + """ + + type: Literal["url_citation"] = "url_citation" + end_index: int + start_index: int + title: str + url: str + + +@json_schema_type +class OpenAIResponseAnnotationContainerFileCitation(BaseModel): + type: Literal["container_file_citation"] = "container_file_citation" + container_id: str + end_index: int + file_id: str + filename: str + start_index: int + + +@json_schema_type +class OpenAIResponseAnnotationFilePath(BaseModel): + type: Literal["file_path"] = "file_path" + file_id: str + index: int + + +OpenAIResponseAnnotations = Annotated[ + OpenAIResponseAnnotationFileCitation + | OpenAIResponseAnnotationCitation + | OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseAnnotationFilePath, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseAnnotations, name="OpenAIResponseAnnotations") + + +@json_schema_type +class OpenAIResponseOutputMessageContentOutputText(BaseModel): + text: str + type: Literal["output_text"] = "output_text" + annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list) + + +OpenAIResponseOutputMessageContent = Annotated[ + OpenAIResponseOutputMessageContentOutputText, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMessageContent") + + +@json_schema_type +class OpenAIResponseMessage(BaseModel): + """ + Corresponds to the various Message types in the Responses API. + They are all under one type because the Responses API gives them all + the same "type" value, and there is no way to tell them apart in certain + scenarios. + """ + + content: str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent] + role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"] + type: Literal["message"] = "message" + + # The fields below are not used in all scenarios, but are required in others. + id: str | None = None + status: str | None = None diff --git a/llama_stack/apis/agents/openai_responses/objects.py b/llama_stack/apis/agents/openai_responses/objects.py new file mode 100644 index 0000000000..a002f7e8ab --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/objects.py @@ -0,0 +1,468 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Annotated +from pydantic import BaseModel, Field +from typing_extensions import Literal +from llama_stack.schema_utils import json_schema_type, register_schema +from .errors import OpenAIResponseError +from .inputs import OpenAIResponseInput +from .outputs import OpenAIResponseContentPart, OpenAIResponseOutput, OpenAIResponseText, OpenAIResponseTextFormat +from .tools import OpenAIResponseTool +from .usage import OpenAIResponseUsage + +@json_schema_type +class OpenAIResponseObject(BaseModel): + """Complete OpenAI response object containing generation results and metadata. + + :param created_at: Unix timestamp when the response was created + :param error: (Optional) Error details if the response generation failed + :param id: Unique identifier for this response + :param model: Model identifier used for generation + :param object: Object type identifier, always "response" + :param output: List of generated output items (messages, tool calls, etc.) + :param parallel_tool_calls: Whether tool calls can be executed in parallel + :param previous_response_id: (Optional) ID of the previous response in a conversation + :param status: Current status of the response generation + :param temperature: (Optional) Sampling temperature used for generation + :param text: Text formatting configuration for the response + :param top_p: (Optional) Nucleus sampling parameter used for generation + :param tools: (Optional) An array of tools the model may call while generating a response. + :param truncation: (Optional) Truncation strategy applied to the response + :param usage: (Optional) Token usage information for the response + """ + + created_at: int + error: OpenAIResponseError | None = None + id: str + model: str + object: Literal["response"] = "response" + output: list[OpenAIResponseOutput] + parallel_tool_calls: bool = False + previous_response_id: str | None = None + status: str + temperature: float | None = None + # Default to text format to avoid breaking the loading of old responses + # before the field was added. New responses will have this set always. + text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")) + top_p: float | None = None + tools: list[OpenAIResponseTool] | None = None + truncation: str | None = None + usage: OpenAIResponseUsage | None = None + + +@json_schema_type +class OpenAIDeleteResponseObject(BaseModel): + """Response object confirming deletion of an OpenAI response. + + :param id: Unique identifier of the deleted response + :param object: Object type identifier, always "response" + :param deleted: Deletion confirmation flag, always True + """ + + id: str + object: Literal["response"] = "response" + deleted: bool = True + + +@json_schema_type +class OpenAIResponseObjectStreamResponseCreated(BaseModel): + """Streaming event indicating a new response has been created. + + :param response: The response object that was created + :param type: Event type identifier, always "response.created" + """ + + response: OpenAIResponseObject + type: Literal["response.created"] = "response.created" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseInProgress(BaseModel): + """Streaming event indicating the response remains in progress. + + :param response: Current response state while in progress + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.in_progress" + """ + + response: OpenAIResponseObject + sequence_number: int + type: Literal["response.in_progress"] = "response.in_progress" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseCompleted(BaseModel): + """Streaming event indicating a response has been completed. + + :param response: Completed response object + :param type: Event type identifier, always "response.completed" + """ + + response: OpenAIResponseObject + type: Literal["response.completed"] = "response.completed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseIncomplete(BaseModel): + """Streaming event emitted when a response ends in an incomplete state. + + :param response: Response object describing the incomplete state + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.incomplete" + """ + + response: OpenAIResponseObject + sequence_number: int + type: Literal["response.incomplete"] = "response.incomplete" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseFailed(BaseModel): + """Streaming event emitted when a response fails. + + :param response: Response object describing the failure + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.failed" + """ + + response: OpenAIResponseObject + sequence_number: int + type: Literal["response.failed"] = "response.failed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel): + """Streaming event for when a new output item is added to the response. + + :param response_id: Unique identifier of the response containing this output + :param item: The output item that was added (message, tool call, etc.) + :param output_index: Index position of this item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.output_item.added" + """ + + response_id: str + item: OpenAIResponseOutput + output_index: int + sequence_number: int + type: Literal["response.output_item.added"] = "response.output_item.added" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel): + """Streaming event for when an output item is completed. + + :param response_id: Unique identifier of the response containing this output + :param item: The completed output item (message, tool call, etc.) + :param output_index: Index position of this item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.output_item.done" + """ + + response_id: str + item: OpenAIResponseOutput + output_index: int + sequence_number: int + type: Literal["response.output_item.done"] = "response.output_item.done" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel): + """Streaming event for incremental text content updates. + + :param content_index: Index position within the text content + :param delta: Incremental text content being added + :param item_id: Unique identifier of the output item being updated + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.output_text.delta" + """ + + content_index: int + delta: str + item_id: str + output_index: int + sequence_number: int + type: Literal["response.output_text.delta"] = "response.output_text.delta" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel): + """Streaming event for when text output is completed. + + :param content_index: Index position within the text content + :param text: Final complete text content of the output item + :param item_id: Unique identifier of the completed output item + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.output_text.done" + """ + + content_index: int + text: str # final text of the output item + item_id: str + output_index: int + sequence_number: int + type: Literal["response.output_text.done"] = "response.output_text.done" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel): + """Streaming event for incremental function call argument updates. + + :param delta: Incremental function call arguments being added + :param item_id: Unique identifier of the function call being updated + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.function_call_arguments.delta" + """ + + delta: str + item_id: str + output_index: int + sequence_number: int + type: Literal["response.function_call_arguments.delta"] = "response.function_call_arguments.delta" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel): + """Streaming event for when function call arguments are completed. + + :param arguments: Final complete arguments JSON string for the function call + :param item_id: Unique identifier of the completed function call + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.function_call_arguments.done" + """ + + arguments: str # final arguments of the function call + item_id: str + output_index: int + sequence_number: int + type: Literal["response.function_call_arguments.done"] = "response.function_call_arguments.done" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel): + """Streaming event for web search calls in progress. + + :param item_id: Unique identifier of the web search call + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.web_search_call.in_progress" + """ + + item_id: str + output_index: int + sequence_number: int + type: Literal["response.web_search_call.in_progress"] = "response.web_search_call.in_progress" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel): + item_id: str + output_index: int + sequence_number: int + type: Literal["response.web_search_call.searching"] = "response.web_search_call.searching" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel): + """Streaming event for completed web search calls. + + :param item_id: Unique identifier of the completed web search call + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.web_search_call.completed" + """ + + item_id: str + output_index: int + sequence_number: int + type: Literal["response.web_search_call.completed"] = "response.web_search_call.completed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel): + sequence_number: int + type: Literal["response.mcp_list_tools.in_progress"] = "response.mcp_list_tools.in_progress" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpListToolsFailed(BaseModel): + sequence_number: int + type: Literal["response.mcp_list_tools.failed"] = "response.mcp_list_tools.failed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpListToolsCompleted(BaseModel): + sequence_number: int + type: Literal["response.mcp_list_tools.completed"] = "response.mcp_list_tools.completed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel): + delta: str + item_id: str + output_index: int + sequence_number: int + type: Literal["response.mcp_call.arguments.delta"] = "response.mcp_call.arguments.delta" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel): + arguments: str # final arguments of the MCP call + item_id: str + output_index: int + sequence_number: int + type: Literal["response.mcp_call.arguments.done"] = "response.mcp_call.arguments.done" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel): + """Streaming event for MCP calls in progress. + + :param item_id: Unique identifier of the MCP call + :param output_index: Index position of the item in the output list + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.mcp_call.in_progress" + """ + + item_id: str + output_index: int + sequence_number: int + type: Literal["response.mcp_call.in_progress"] = "response.mcp_call.in_progress" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel): + """Streaming event for failed MCP calls. + + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.mcp_call.failed" + """ + + sequence_number: int + type: Literal["response.mcp_call.failed"] = "response.mcp_call.failed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel): + """Streaming event for completed MCP calls. + + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.mcp_call.completed" + """ + + sequence_number: int + type: Literal["response.mcp_call.completed"] = "response.mcp_call.completed" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel): + """Streaming event for when a new content part is added to a response item. + + :param content_index: Index position of the part within the content array + :param response_id: Unique identifier of the response containing this content + :param item_id: Unique identifier of the output item containing this content part + :param output_index: Index position of the output item in the response + :param part: The content part that was added + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.content_part.added" + """ + + content_index: int + response_id: str + item_id: str + output_index: int + part: OpenAIResponseContentPart + sequence_number: int + type: Literal["response.content_part.added"] = "response.content_part.added" + + +@json_schema_type +class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel): + """Streaming event for when a content part is completed. + + :param content_index: Index position of the part within the content array + :param response_id: Unique identifier of the response containing this content + :param item_id: Unique identifier of the output item containing this content part + :param output_index: Index position of the output item in the response + :param part: The completed content part + :param sequence_number: Sequential number for ordering streaming events + :param type: Event type identifier, always "response.content_part.done" + """ + + content_index: int + response_id: str + item_id: str + output_index: int + part: OpenAIResponseContentPart + sequence_number: int + type: Literal["response.content_part.done"] = "response.content_part.done" + + +OpenAIResponseObjectStream = Annotated[ + OpenAIResponseObjectStreamResponseCreated + | OpenAIResponseObjectStreamResponseInProgress + | OpenAIResponseObjectStreamResponseOutputItemAdded + | OpenAIResponseObjectStreamResponseOutputItemDone + | OpenAIResponseObjectStreamResponseOutputTextDelta + | OpenAIResponseObjectStreamResponseOutputTextDone + | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta + | OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone + | OpenAIResponseObjectStreamResponseWebSearchCallInProgress + | OpenAIResponseObjectStreamResponseWebSearchCallSearching + | OpenAIResponseObjectStreamResponseWebSearchCallCompleted + | OpenAIResponseObjectStreamResponseMcpListToolsInProgress + | OpenAIResponseObjectStreamResponseMcpListToolsFailed + | OpenAIResponseObjectStreamResponseMcpListToolsCompleted + | OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta + | OpenAIResponseObjectStreamResponseMcpCallArgumentsDone + | OpenAIResponseObjectStreamResponseMcpCallInProgress + | OpenAIResponseObjectStreamResponseMcpCallFailed + | OpenAIResponseObjectStreamResponseMcpCallCompleted + | OpenAIResponseObjectStreamResponseContentPartAdded + | OpenAIResponseObjectStreamResponseContentPartDone + | OpenAIResponseObjectStreamResponseIncomplete + | OpenAIResponseObjectStreamResponseFailed + | OpenAIResponseObjectStreamResponseCompleted, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream") + + +@json_schema_type +class OpenAIResponseObjectWithInput(OpenAIResponseObject): + """OpenAI response object extended with input context information. + + :param input: List of input items that led to this response + """ + + input: list[OpenAIResponseInput] + + def to_response_object(self) -> OpenAIResponseObject: + """Convert to OpenAIResponseObject by excluding input field.""" + return OpenAIResponseObject(**{k: v for k, v in self.model_dump().items() if k != "input"}) + + +@json_schema_type +class ListOpenAIResponseObject(BaseModel): + """Paginated list of OpenAI response objects with navigation metadata. + + :param data: List of response objects with their input context + :param has_more: Whether there are more results available beyond this page + :param first_id: Identifier of the first item in this page + :param last_id: Identifier of the last item in this page + :param object: Object type identifier, always "list" + """ + + data: list[OpenAIResponseObjectWithInput] + has_more: bool + first_id: str + last_id: str + object: Literal["list"] = "list" diff --git a/llama_stack/apis/agents/openai_responses/outputs.py b/llama_stack/apis/agents/openai_responses/outputs.py new file mode 100644 index 0000000000..64ea78660e --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/outputs.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Annotated, Any +from pydantic import BaseModel, Field +from typing_extensions import Literal, TypedDict +from llama_stack.schema_utils import json_schema_type, register_schema +from .messages import OpenAIResponseAnnotations, OpenAIResponseMessage +from .tool_calls import OpenAIResponseMCPApprovalRequest, OpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseOutputMessageFunctionToolCall, OpenAIResponseOutputMessageMCPCall, OpenAIResponseOutputMessageMCPListTools, OpenAIResponseOutputMessageWebSearchToolCall + +OpenAIResponseOutput = Annotated[ + OpenAIResponseMessage + | OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseOutputMessageFileSearchToolCall + | OpenAIResponseOutputMessageFunctionToolCall + | OpenAIResponseOutputMessageMCPCall + | OpenAIResponseOutputMessageMCPListTools + | OpenAIResponseMCPApprovalRequest, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput") + + +class OpenAIResponseTextFormat(TypedDict, total=False): + """Configuration for Responses API text format. + + :param type: Must be "text", "json_schema", or "json_object" to identify the format type + :param name: The name of the response format. Only used for json_schema. + :param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema. + :param description: (Optional) A description of the response format. Only used for json_schema. + :param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema. + """ + + type: Literal["text"] | Literal["json_schema"] | Literal["json_object"] + name: str | None + schema: dict[str, Any] | None + description: str | None + strict: bool | None + + +@json_schema_type +class OpenAIResponseText(BaseModel): + """Text response configuration for OpenAI responses. + + :param format: (Optional) Text format configuration specifying output format requirements + """ + + format: OpenAIResponseTextFormat | None = None + + +@json_schema_type +class OpenAIResponseContentPartOutputText(BaseModel): + """Text content within a streamed response part. + + :param type: Content part type identifier, always "output_text" + :param text: Text emitted for this content part + :param annotations: Structured annotations associated with the text + :param logprobs: (Optional) Token log probability details + """ + + type: Literal["output_text"] = "output_text" + text: str + annotations: list[OpenAIResponseAnnotations] = Field(default_factory=list) + logprobs: list[dict[str, Any]] | None = None + + +@json_schema_type +class OpenAIResponseContentPartRefusal(BaseModel): + """Refusal content within a streamed response part. + + :param type: Content part type identifier, always "refusal" + :param refusal: Refusal text supplied by the model + """ + + type: Literal["refusal"] = "refusal" + refusal: str + + +@json_schema_type +class OpenAIResponseContentPartReasoningText(BaseModel): + """Reasoning text emitted as part of a streamed response. + + :param type: Content part type identifier, always "reasoning_text" + :param text: Reasoning text supplied by the model + """ + + type: Literal["reasoning_text"] = "reasoning_text" + text: str + + +OpenAIResponseContentPart = Annotated[ + OpenAIResponseContentPartOutputText | OpenAIResponseContentPartRefusal | OpenAIResponseContentPartReasoningText, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseContentPart, name="OpenAIResponseContentPart") diff --git a/llama_stack/apis/agents/openai_responses/tool_calls.py b/llama_stack/apis/agents/openai_responses/tool_calls.py new file mode 100644 index 0000000000..b267dad17e --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/tool_calls.py @@ -0,0 +1,167 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Any +from pydantic import BaseModel +from typing_extensions import Literal +from llama_stack.schema_utils import json_schema_type, register_schema + +@json_schema_type +class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel): + """Web search tool call output message for OpenAI responses. + + :param id: Unique identifier for this tool call + :param status: Current status of the web search operation + :param type: Tool call type identifier, always "web_search_call" + """ + + id: str + status: str + type: Literal["web_search_call"] = "web_search_call" + + +class OpenAIResponseOutputMessageFileSearchToolCallResults(BaseModel): + """Search results returned by the file search operation. + + :param attributes: (Optional) Key-value attributes associated with the file + :param file_id: Unique identifier of the file containing the result + :param filename: Name of the file containing the result + :param score: Relevance score for this search result (between 0 and 1) + :param text: Text content of the search result + """ + + attributes: dict[str, Any] + file_id: str + filename: str + score: float + text: str + + +@json_schema_type +class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel): + """File search tool call output message for OpenAI responses. + + :param id: Unique identifier for this tool call + :param queries: List of search queries executed + :param status: Current status of the file search operation + :param type: Tool call type identifier, always "file_search_call" + :param results: (Optional) Search results returned by the file search operation + """ + + id: str + queries: list[str] + status: str + type: Literal["file_search_call"] = "file_search_call" + results: list[OpenAIResponseOutputMessageFileSearchToolCallResults] | None = None + + +@json_schema_type +class OpenAIResponseOutputMessageFunctionToolCall(BaseModel): + """Function tool call output message for OpenAI responses. + + :param call_id: Unique identifier for the function call + :param name: Name of the function being called + :param arguments: JSON string containing the function arguments + :param type: Tool call type identifier, always "function_call" + :param id: (Optional) Additional identifier for the tool call + :param status: (Optional) Current status of the function call execution + """ + + call_id: str + name: str + arguments: str + type: Literal["function_call"] = "function_call" + id: str | None = None + status: str | None = None + + +@json_schema_type +class OpenAIResponseOutputMessageMCPCall(BaseModel): + """Model Context Protocol (MCP) call output message for OpenAI responses. + + :param id: Unique identifier for this MCP call + :param type: Tool call type identifier, always "mcp_call" + :param arguments: JSON string containing the MCP call arguments + :param name: Name of the MCP method being called + :param server_label: Label identifying the MCP server handling the call + :param error: (Optional) Error message if the MCP call failed + :param output: (Optional) Output result from the successful MCP call + """ + + id: str + type: Literal["mcp_call"] = "mcp_call" + arguments: str + name: str + server_label: str + error: str | None = None + output: str | None = None + + +class MCPListToolsTool(BaseModel): + """Tool definition returned by MCP list tools operation. + + :param input_schema: JSON schema defining the tool's input parameters + :param name: Name of the tool + :param description: (Optional) Description of what the tool does + """ + + input_schema: dict[str, Any] + name: str + description: str | None = None + + +@json_schema_type +class OpenAIResponseOutputMessageMCPListTools(BaseModel): + """MCP list tools output message containing available tools from an MCP server. + + :param id: Unique identifier for this MCP list tools operation + :param type: Tool call type identifier, always "mcp_list_tools" + :param server_label: Label identifying the MCP server providing the tools + :param tools: List of available tools provided by the MCP server + """ + + id: str + type: Literal["mcp_list_tools"] = "mcp_list_tools" + server_label: str + tools: list[MCPListToolsTool] + + +@json_schema_type +class OpenAIResponseMCPApprovalRequest(BaseModel): + """ + A request for human approval of a tool invocation. + """ + + arguments: str + id: str + name: str + server_label: str + type: Literal["mcp_approval_request"] = "mcp_approval_request" + + +@json_schema_type +class OpenAIResponseMCPApprovalResponse(BaseModel): + """ + A response to an MCP approval request. + """ + + approval_request_id: str + approve: bool + type: Literal["mcp_approval_response"] = "mcp_approval_response" + id: str | None = None + reason: str | None = None + + +@json_schema_type +class OpenAIResponseInputFunctionToolCallOutput(BaseModel): + """ + This represents the output of a function call that gets passed back to the model. + """ + + call_id: str + output: str + type: Literal["function_call_output"] = "function_call_output" + id: str | None = None + status: str | None = None diff --git a/llama_stack/apis/agents/openai_responses/tools.py b/llama_stack/apis/agents/openai_responses/tools.py new file mode 100644 index 0000000000..e58e3b32e5 --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/tools.py @@ -0,0 +1,140 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from typing import Annotated, Any +from pydantic import BaseModel, Field +from typing_extensions import Literal +from llama_stack.schema_utils import json_schema_type, register_schema + +WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11"] + + +@json_schema_type +class OpenAIResponseInputToolWebSearch(BaseModel): + """Web search tool configuration for OpenAI response inputs. + + :param type: Web search tool type variant to use + :param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high" + """ + + # Must match values of WebSearchToolTypes above + type: Literal["web_search"] | Literal["web_search_preview"] | Literal["web_search_preview_2025_03_11"] = ( + "web_search" + ) + # TODO: actually use search_context_size somewhere... + search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$") + + +@json_schema_type +class OpenAIResponseInputToolFunction(BaseModel): + """Function tool configuration for OpenAI response inputs. + + :param type: Tool type identifier, always "function" + :param name: Name of the function that can be called + :param description: (Optional) Description of what the function does + :param parameters: (Optional) JSON schema defining the function's parameters + :param strict: (Optional) Whether to enforce strict parameter validation + """ + + type: Literal["function"] = "function" + name: str + description: str | None = None + parameters: dict[str, Any] | None + strict: bool | None = None + + +@json_schema_type +class OpenAIResponseInputToolFileSearch(BaseModel): + """File search tool configuration for OpenAI response inputs. + + :param type: Tool type identifier, always "file_search" + :param vector_store_ids: List of vector store identifiers to search within + :param filters: (Optional) Additional filters to apply to the search + :param max_num_results: (Optional) Maximum number of search results to return (1-50) + :param ranking_options: (Optional) Options for ranking and scoring search results + """ + + type: Literal["file_search"] = "file_search" + vector_store_ids: list[str] + filters: dict[str, Any] | None = None + max_num_results: int | None = Field(default=10, ge=1, le=50) + ranking_options: FileSearchRankingOptions | None = None + + +class ApprovalFilter(BaseModel): + """Filter configuration for MCP tool approval requirements. + + :param always: (Optional) List of tool names that always require approval + :param never: (Optional) List of tool names that never require approval + """ + + always: list[str] | None = None + never: list[str] | None = None + + +class AllowedToolsFilter(BaseModel): + """Filter configuration for restricting which MCP tools can be used. + + :param tool_names: (Optional) List of specific tool names that are allowed + """ + + tool_names: list[str] | None = None + + +@json_schema_type +class OpenAIResponseInputToolMCP(BaseModel): + """Model Context Protocol (MCP) tool configuration for OpenAI response inputs. + + :param type: Tool type identifier, always "mcp" + :param server_label: Label to identify this MCP server + :param server_url: URL endpoint of the MCP server + :param headers: (Optional) HTTP headers to include when connecting to the server + :param require_approval: Approval requirement for tool calls ("always", "never", or filter) + :param allowed_tools: (Optional) Restriction on which tools can be used from this server + """ + + type: Literal["mcp"] = "mcp" + server_label: str + server_url: str + headers: dict[str, Any] | None = None + + require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never" + allowed_tools: list[str] | AllowedToolsFilter | None = None + + +OpenAIResponseInputTool = Annotated[ + OpenAIResponseInputToolWebSearch + | OpenAIResponseInputToolFileSearch + | OpenAIResponseInputToolFunction + | OpenAIResponseInputToolMCP, + Field(discriminator="type"), +] + +register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool") + + +@json_schema_type +class OpenAIResponseToolMCP(BaseModel): + """Model Context Protocol (MCP) tool configuration for OpenAI response object. + + :param type: Tool type identifier, always "mcp" + :param server_label: Label to identify this MCP server + :param allowed_tools: (Optional) Restriction on which tools can be used from this server + """ + + type: Literal["mcp"] = "mcp" + server_label: str + allowed_tools: list[str] | AllowedToolsFilter | None = None + + +OpenAIResponseTool = Annotated[ + OpenAIResponseInputToolWebSearch + | OpenAIResponseInputToolFileSearch + | OpenAIResponseInputToolFunction + | OpenAIResponseToolMCP, # The only type that differes from that in the inputs is the MCP tool + Field(discriminator="type"), +] + +register_schema(OpenAIResponseTool, name="OpenAIResponseTool") diff --git a/llama_stack/apis/agents/openai_responses/usage.py b/llama_stack/apis/agents/openai_responses/usage.py new file mode 100644 index 0000000000..8d3b779ec4 --- /dev/null +++ b/llama_stack/apis/agents/openai_responses/usage.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +from pydantic import BaseModel +from llama_stack.schema_utils import json_schema_type, register_schema + +class OpenAIResponseUsageOutputTokensDetails(BaseModel): + """Token details for output tokens in OpenAI response usage. + + :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models) + """ + + reasoning_tokens: int | None = None + + +class OpenAIResponseUsageInputTokensDetails(BaseModel): + """Token details for input tokens in OpenAI response usage. + + :param cached_tokens: Number of tokens retrieved from cache + """ + + cached_tokens: int | None = None + + +@json_schema_type +class OpenAIResponseUsage(BaseModel): + """Usage information for OpenAI response. + + :param input_tokens: Number of tokens in the input + :param output_tokens: Number of tokens in the output + :param total_tokens: Total tokens used (input + output) + :param input_tokens_details: Detailed breakdown of input token usage + :param output_tokens_details: Detailed breakdown of output token usage + """ + + input_tokens: int + output_tokens: int + total_tokens: int + input_tokens_details: OpenAIResponseUsageInputTokensDetails | None = None + output_tokens_details: OpenAIResponseUsageOutputTokensDetails | None = None diff --git a/pyproject.toml b/pyproject.toml index 81997c249a..4d69c30532 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,7 @@ dev = [ "types-setuptools", "pre-commit", "ruamel.yaml", # needed for openapi generator + "libcst>=1.5.0", # responses schema generator ] # These are the dependencies required for running unit tests. unit = [ diff --git a/scripts/generate_openai_responses.py b/scripts/generate_openai_responses.py new file mode 100644 index 0000000000..d7db6e73c0 --- /dev/null +++ b/scripts/generate_openai_responses.py @@ -0,0 +1,650 @@ +"""Generate llama_stack.apis.agents.openai_responses package from openai-python types. + +This script consumes the typed models that ship with the OpenAI Python SDK and +transforms them into the Pydantic ``BaseModel`` / ``TypedDict`` definitions that +the llama-stack project expects. The goal is to keep our local schema file in +sync with OpenAI's source of truth without having to manually copy/paste large +chunks of code every time the upstream API evolves. + +The generator walks the dependency graph that starts at +``ResponseCreateParams`` (the request payload) and ``Response`` (the response +object) and recursively captures every referenced type. Each definition is then +rewritten so that it: + +* is assigned to a focused module within ``llama_stack.apis.agents.openai_responses`` +* uses ``pydantic.BaseModel`` instead of OpenAI's internal ``BaseModel`` +* prefixes names with ``OpenAI`` so they remain isolated from our own models + +Running the script overwrites the ``openai_responses`` package found at +``llama_stack/apis/agents/openai_responses``. Use ``--output`` to write the +generated package somewhere else (for example when capturing review snapshots). +""" + +import argparse +import ast +from collections import OrderedDict +from dataclasses import dataclass +import importlib +import pathlib +import shutil +from typing import Iterable + +import libcst as cst + +ROOT = pathlib.Path(__file__).resolve().parents[1] +OUTPUT_DIR = ROOT / "llama_stack" / "apis" / "agents" / "openai_responses" +MODULE_HEADER = ( + "# NOTE: This file is auto-generated by scripts/generate_openai_responses.py\n" + "# Do not edit by hand.\n" + "#\n" + "# Copyright (c) Meta Platforms, Inc. and affiliates.\n" + "# All rights reserved.\n" + "#\n" + "# This source code is licensed under the terms described in the LICENSE file in\n" + "# the root directory of this source tree.\n" +) + +TARGET_PREFIX = "OpenAI" +ROOTS = { + ("openai.types.responses.response_create_params", "ResponseCreateParams"), + ("openai.types.responses.response", "Response"), +} +TYPING_NAMES = { + "Annotated", + "Any", + "Callable", + "Dict", + "Iterable", + "List", + "Mapping", + "Optional", + "Sequence", + "Tuple", + "Union", +} +TYPING_EXT_NAMES = { + "Literal", + "NotRequired", + "Required", + "TypeAlias", + "TypedDict", +} +GROUP_ORDER = [ + "errors", + "messages", + "tool_calls", + "tools", + "outputs", + "usage", + "objects", + "inputs", + "requests", + "configs", + "shared", +] +SKIP_NAMES = { + "False", + "True", + "None", + "Field", + "BaseModel", + "json_schema_type", + "register_schema", +} +SKIP_NAMES.update(TYPING_NAMES) +SKIP_NAMES.update(TYPING_EXT_NAMES) + + +@dataclass(frozen=True) +class DefinitionKey: + module: str + name: str + + +@dataclass +class ModuleData: + module: cst.Module + imports: dict[str, tuple[str, str]] + local_defs: dict[str, cst.CSTNode] + py_defs: dict[str, ast.AST] + + +@dataclass +class Definition: + key: DefinitionKey + node: cst.CSTNode + register: bool + decorate: bool + py_node: ast.AST | None + references: set[str] + + +@dataclass +class RenderedDefinition: + name: str + code: str + register: bool + decorate: bool + dependencies: set[str] + typing_names: set[str] + typing_ext_names: set[str] + pydantic_names: set[str] + + +_MODULE_CACHE: dict[str, ModuleData] = {} + + +def resolve_relative(module_path: str, level: int, target: str | None) -> str: + if level == 0: + return target or "" + parts = module_path.split(".") + base = parts[: len(parts) - level] + if target: + base.extend(target.split(".")) + return ".".join(base) + + +def load_module_data(module_path: str) -> ModuleData: + cached = _MODULE_CACHE.get(module_path) + if cached: + return cached + + module = importlib.import_module(module_path) + file = pathlib.Path(module.__file__).resolve() + source = file.read_text() + cst_module = cst.parse_module(source) + py_module = ast.parse(source) + + imports: dict[str, tuple[str, str]] = {} + module_prefix = module_path + for node in py_module.body: + if isinstance(node, ast.ImportFrom): + if node.module == "__future__": + continue + module_name = resolve_relative(module_prefix, node.level or 0, node.module) + if not module_name.startswith("openai.types"): + continue + for alias in node.names: + if alias.name == "*": + continue + asname = alias.asname or alias.name.split(".")[-1] + original = alias.name.split(".")[-1] + imports[asname] = (module_name, original) + + local_defs: dict[str, cst.CSTNode] = {} + py_defs: dict[str, ast.AST] = {} + for stmt, py_stmt in zip(cst_module.body, py_module.body): + if isinstance(stmt, cst.ClassDef) and isinstance(py_stmt, ast.ClassDef): + local_defs[stmt.name.value] = stmt + py_defs[stmt.name.value] = py_stmt + continue + if isinstance(stmt, cst.SimpleStatementLine): + for expr in stmt.body: + target_name: str | None = None + if isinstance(expr, cst.AnnAssign): + target = expr.target + if isinstance(target, cst.Name): + target_name = target.value + elif isinstance(expr, cst.Assign): + if len(expr.targets) == 1 and isinstance(expr.targets[0].target, cst.Name): + target_name = expr.targets[0].target.value + if target_name: + local_defs[target_name] = expr + if isinstance(py_stmt, (ast.AnnAssign, ast.Assign)): + if isinstance(py_stmt, ast.Assign): + if len(py_stmt.targets) != 1 or not isinstance(py_stmt.targets[0], ast.Name): + continue + py_defs[py_stmt.targets[0].id] = py_stmt + elif isinstance(py_stmt, ast.AnnAssign) and isinstance(py_stmt.target, ast.Name): + py_defs[py_stmt.target.id] = py_stmt + + module_data = ModuleData(module=cst_module, imports=imports, local_defs=local_defs, py_defs=py_defs) + _MODULE_CACHE[module_path] = module_data + return module_data + + +class ReferenceCollector(cst.CSTVisitor): + def __init__(self) -> None: + self.names: set[str] = set() + + def visit_Name(self, node: cst.Name) -> None: # noqa: D401 - libcst visitor API + self.names.add(node.value) + + +def collect_references(node: cst.CSTNode) -> set[str]: + collector = ReferenceCollector() + node.visit(collector) + return collector.names + + +def is_typed_dict(node: cst.ClassDef) -> bool: + for base in node.bases: + value = base.value + if isinstance(value, cst.Name) and value.value == "TypedDict": + return True + if isinstance(value, cst.Attribute) and value.attr.value == "TypedDict": + return True + return False + + +def determine_group(definition: Definition, target_name: str) -> str: + module_tail = definition.key.module.split(".")[-1] + lower_module = module_tail.lower() + lower_name = target_name.lower() + + if definition.key.module.startswith("openai.types.shared"): + return "shared" + if "error" in lower_module or "error" in lower_name: + return "errors" + if "usage" in lower_module or "usage" in lower_name: + return "usage" + if "tool_call" in lower_module: + return "tool_calls" + if "tool_choice" in lower_module: + return "tools" + if "web_search" in lower_module and "tool" not in lower_module: + return "tool_calls" + if "tool_param" in lower_module or lower_module.endswith("tool") or "tool_" in lower_module: + return "tools" + if "message" in lower_module or "annotation" in lower_module: + return "messages" + if "create_params" in lower_module: + return "requests" + if "prompt" in lower_module: + return "inputs" + if "input" in lower_module: + return "inputs" + if "format" in lower_module or "config" in lower_module or "chat_model" in lower_module or "responses_model" in lower_module: + return "configs" + if "reasoning" in lower_module: + return "outputs" + if "output" in lower_module or "content" in lower_module: + return "outputs" + if "status" in lower_module or "conversation" in lower_module or "stream" in lower_module or "object" in lower_module: + return "objects" + return "objects" + + +def collect_code_names(code: str) -> set[str]: + parsed = ast.parse(code) + names: set[str] = set() + + class Visitor(ast.NodeVisitor): + def visit_Name(self, node: ast.Name) -> None: # noqa: D401 + names.add(node.id) + + Visitor().visit(parsed) + names.discard("json_schema_type") + names.discard("register_schema") + return names + + +def format_relative_import(module: str, names: list[str]) -> str: + if len(names) == 1: + return f"from .{module} import {names[0]}" + body = [f"from .{module} import ("] + body.extend(f" {name}," for name in names) + body.append(")") + return "\n".join(body) + + +def is_type_alias_assign(node: ast.Assign) -> bool: + value = node.value + return not isinstance(value, (ast.Constant, ast.List, ast.Dict, ast.Set)) + + +def gather_definitions(roots: Iterable[DefinitionKey]) -> list[Definition]: + processed: dict[DefinitionKey, Definition] = OrderedDict() + visiting: set[DefinitionKey] = set() + + def visit(key: DefinitionKey) -> None: + if key in processed: + return + if key in visiting: + return + visiting.add(key) + + module_data = load_module_data(key.module) + node = module_data.local_defs.get(key.name) + if not node: + raise ValueError(f"Unable to locate definition for {key.name} in {key.module}") + + references = collect_references(node) + reference_names = {name for name in references if name not in SKIP_NAMES and name != key.name} + dependencies: list[DefinitionKey] = [] + for name in reference_names: + if name in module_data.local_defs: + dependencies.append(DefinitionKey(key.module, name)) + continue + target = module_data.imports.get(name) + if target: + module_name, original = target + dependencies.append(DefinitionKey(module_name, original)) + + for dep in dependencies: + visit(dep) + + register = False + decorate = False + py_node = module_data.py_defs.get(key.name) + if isinstance(node, cst.ClassDef): + decorate = not is_typed_dict(node) + register = decorate + elif isinstance(node, cst.AnnAssign): + register = True + elif isinstance(node, cst.Assign) and isinstance(py_node, ast.Assign): + register = is_type_alias_assign(py_node) + + processed[key] = Definition( + key=key, + node=node, + register=register, + decorate=decorate, + py_node=py_node, + references=reference_names, + ) + visiting.remove(key) + + for root in roots: + visit(root) + + return list(processed.values()) + + +def compute_target_name(name: str) -> str: + if name.startswith(TARGET_PREFIX): + return name + return f"{TARGET_PREFIX}{name}" + + +class NameRewriteTransformer(cst.CSTTransformer): + def __init__(self, name_map: dict[str, str]): + self.name_map = name_map + + def leave_Name(self, original_node: cst.Name, updated_node: cst.Name) -> cst.BaseExpression: + replacement = self.name_map.get(updated_node.value) + if replacement: + return updated_node.with_changes(value=replacement) + return updated_node + + def leave_Attribute(self, original_node: cst.Attribute, updated_node: cst.Attribute) -> cst.BaseExpression: + if isinstance(updated_node.value, cst.Name): + replacement = self.name_map.get(updated_node.value.value) + if replacement: + return cst.Name(replacement) + return updated_node + + +def _extract_string(stmt: cst.BaseStatement) -> str | None: + if not isinstance(stmt, cst.SimpleStatementLine): + return None + if len(stmt.body) != 1: + return None + expr = stmt.body[0] + if not isinstance(expr, cst.Expr): + return None + value = expr.value + if not isinstance(value, cst.SimpleString): + return None + return ast.literal_eval(value.value) + + +def _format_param_doc(name: str, description: str) -> str: + description = description.strip() + if not description: + return f" :param {name}:" + lines = description.splitlines() + first = lines[0] + rest = lines[1:] + formatted = f" :param {name}: {first}" + for line in rest: + if line: + formatted += "\n " + line + else: + formatted += "\n" + return formatted + + +def _rewrite_docstrings(definition: Definition, class_def: cst.ClassDef) -> cst.ClassDef: + statements = list(class_def.body.body) + new_body: list[cst.BaseStatement] = [] + param_docs: list[tuple[str, str]] = [] + + summary: str | None = None + if statements: + potential = _extract_string(statements[0]) + if potential is not None: + summary = potential.strip() + statements = statements[1:] + elif isinstance(definition.py_node, ast.ClassDef): + docstring = ast.get_docstring(definition.py_node) + if docstring: + summary = docstring.strip() + + i = 0 + while i < len(statements): + stmt = statements[i] + handled = False + if isinstance(stmt, cst.SimpleStatementLine) and stmt.body: + first = stmt.body[0] + target_name: str | None = None + if isinstance(first, cst.AnnAssign): + target = first.target + if isinstance(target, cst.Name): + target_name = target.value + elif isinstance(first, cst.Assign): + if len(first.targets) == 1 and isinstance(first.targets[0].target, cst.Name): + target_name = first.targets[0].target.value + if target_name: + doc: str | None = None + if i + 1 < len(statements): + potential_doc = _extract_string(statements[i + 1]) + if potential_doc is not None: + doc = potential_doc + i += 1 + if doc: + param_docs.append((target_name, doc)) + new_body.append(stmt) + handled = True + if not handled: + new_body.append(stmt) + i += 1 + + doc_lines: list[str] = [] + has_summary = bool(summary) + if has_summary: + doc_lines.append(summary) + if param_docs: + if has_summary: + doc_lines.append("") + doc_lines.extend(_format_param_doc(name, text) for name, text in param_docs) + + if doc_lines: + content = "\n".join(doc_lines) + if not has_summary: + content = "\n" + content + if "\n" in content: + content = content + "\n" + content = content.replace('"""', '\\"\\"\\"') + doc_node = cst.SimpleStatementLine( + body=[cst.Expr(value=cst.SimpleString(value=f'"""{content}"""'))] + ) + new_body.insert(0, doc_node) + + return class_def.with_changes(body=class_def.body.with_changes(body=new_body)) + + +def render_definitions(definitions: Iterable[Definition]) -> tuple[dict[str, list[RenderedDefinition]], dict[str, str]]: + defs = list(definitions) + name_map: dict[str, str] = { + "PropertyInfo": "Field", + "FieldInfo": "Field", + "SequenceNotStr": "Sequence", + } + for definition in defs: + name_map.setdefault(definition.key.name, compute_target_name(definition.key.name)) + + grouped: dict[str, list[RenderedDefinition]] = {} + name_to_group: dict[str, str] = {} + + for definition in defs: + node = definition.node + transformer = NameRewriteTransformer(name_map) + rewritten = node.visit(transformer) + + target_name = name_map[definition.key.name] + if isinstance(rewritten, cst.ClassDef): + rewritten = rewritten.with_changes(name=cst.Name(target_name)) + if definition.decorate: + decorator = cst.Decorator(cst.Name("json_schema_type")) + rewritten = rewritten.with_changes(decorators=[decorator, *rewritten.decorators]) + rewritten = _rewrite_docstrings(definition, rewritten) + elif isinstance(rewritten, cst.AnnAssign): + rewritten = rewritten.with_changes(target=cst.Name(target_name)) + elif isinstance(rewritten, cst.Assign): + targets = list(rewritten.targets) + first = targets[0] + if isinstance(first.target, cst.Name): + targets[0] = first.with_changes(target=cst.Name(target_name)) + rewritten = rewritten.with_changes(targets=targets) + + code = cst.Module([]).code_for_node(rewritten) + dependencies = { + name_map[name] + for name in definition.references + if name in name_map and name != definition.key.name + } + used_names = collect_code_names(code) + typing_names = {name for name in used_names if name in TYPING_NAMES} + typing_ext_names = {name for name in used_names if name in TYPING_EXT_NAMES} + pydantic_names = {name for name in used_names if name in {"BaseModel", "Field"}} + + group = determine_group(definition, target_name) + name_to_group[target_name] = group + grouped.setdefault(group, []).append( + RenderedDefinition( + name=target_name, + code=code, + register=definition.register, + decorate=definition.decorate, + dependencies=dependencies, + typing_names=typing_names, + typing_ext_names=typing_ext_names, + pydantic_names=pydantic_names, + ) + ) + + return grouped, name_to_group + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Generate llama_stack.apis.agents.openai_responses package from openai-python types.", + ) + parser.add_argument( + "--output", + type=pathlib.Path, + default=OUTPUT_DIR, + help=( + "Directory to write the generated schema package. Defaults to the checked-in " + "openai_responses package." + ), + ) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + roots = [DefinitionKey(module=m, name=n) for m, n in ROOTS] + definitions = gather_definitions(roots) + grouped, name_to_group = render_definitions(definitions) + + output_dir = args.output + if output_dir.exists(): + shutil.rmtree(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + module_order = [group for group in GROUP_ORDER if group in grouped] + remaining_groups = sorted(group for group in grouped if group not in GROUP_ORDER) + module_order.extend(remaining_groups) + + for group in module_order: + definitions_in_group = grouped[group] + typing_names = sorted(set().union(*(defn.typing_names for defn in definitions_in_group))) + typing_ext_names = sorted(set().union(*(defn.typing_ext_names for defn in definitions_in_group))) + pydantic_names = sorted(set().union(*(defn.pydantic_names for defn in definitions_in_group))) + needs_json_schema_type = any(defn.decorate for defn in definitions_in_group) + needs_register_schema = any(defn.register for defn in definitions_in_group) + + import_lines: list[str] = [] + if typing_names: + import_lines.append("from typing import " + ", ".join(typing_names)) + if pydantic_names: + import_lines.append("from pydantic import " + ", ".join(pydantic_names)) + if typing_ext_names: + import_lines.append("from typing_extensions import " + ", ".join(typing_ext_names)) + + schema_utils: list[str] = [] + if needs_json_schema_type: + schema_utils.append("json_schema_type") + if needs_register_schema: + schema_utils.append("register_schema") + if schema_utils: + import_lines.append("from llama_stack.schema_utils import " + ", ".join(schema_utils)) + + external_deps: dict[str, set[str]] = {} + for defn in definitions_in_group: + for dep in defn.dependencies: + dep_group = name_to_group.get(dep) + if not dep_group or dep_group == group: + continue + external_deps.setdefault(dep_group, set()).add(dep) + + for dep_group in sorted(external_deps): + names = sorted(external_deps[dep_group]) + import_lines.append(format_relative_import(dep_group, names)) + + sections: list[str] = [] + for defn in definitions_in_group: + block = defn.code + if defn.register: + block += f"\n\nregister_schema({defn.name}, name=\"{defn.name}\")" + sections.append(block) + + module_text = MODULE_HEADER + if import_lines: + module_text += "\n".join(import_lines) + "\n\n" + else: + module_text += "\n" + module_text += "\n\n\n".join(sections) + "\n" + (output_dir / f"{group}.py").write_text(module_text) + + init_lines = [ + '"""OpenAI Responses schema re-exports."""', + '', + '# This package mirrors the original openai_responses.py module but splits the', + '# definitions into focused submodules. The generator at', + '# scripts/generate_openai_responses.py targets this layout so the code can be', + '# updated automatically in manageable chunks.', + '', + ] + + all_names: list[str] = [] + for group in module_order: + definitions_in_group = grouped[group] + names = [defn.name for defn in definitions_in_group] + all_names.extend(names) + init_lines.append(format_relative_import(group, names)) + init_lines.append('') + + init_lines.append('__all__ = [') + for name in all_names: + init_lines.append(f" '{name}',") + init_lines.append(']') + init_lines.append('') + + (output_dir / '__init__.py').write_text('\n'.join(init_lines)) + + +if __name__ == "__main__": + main()