Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions python/packages/azure-ai/agent_framework_azure_ai/_shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,4 +571,25 @@ def _convert_response_format(response_format: Mapping[str, Any]) -> dict[str, An
if format_type in {"json_object", "text"}:
return {"type": format_type}

# Handle raw JSON schemas (e.g. {"type": "object", "properties": {...}})
# by wrapping them in the expected json_schema envelope.
# Detect by checking for JSON Schema primitive types or known schema keywords.
json_schema_keywords = {"properties", "anyOf", "oneOf", "allOf", "$ref", "$defs"}
json_schema_primitive_types = {"object", "array", "string", "number", "integer", "boolean", "null"}
if format_type in json_schema_primitive_types or (
format_type is None and any(k in response_format for k in json_schema_keywords)
):
schema = dict(response_format)
if schema.get("type") == "object" and "additionalProperties" not in schema:
schema["additionalProperties"] = False
# Pop title from schema since OpenAI strict mode rejects unknown keys;
# use it as the schema name in the envelope instead.
name = str(schema.pop("title", None) or "response")
return {
"type": "json_schema",
"name": name,
"schema": schema,
"strict": True,
}

raise IntegrationInvalidRequestException("Unsupported response_format provided for Azure AI client.")
26 changes: 26 additions & 0 deletions python/packages/azure-ai/tests/test_shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,32 @@ def test_convert_response_format_json_schema_missing_schema_raises() -> None:
_convert_response_format({"type": "json_schema", "json_schema": {}})


def test_convert_response_format_raw_json_schema_with_properties() -> None:
"""Test raw JSON schema with properties is wrapped in json_schema envelope."""
result = _convert_response_format({"type": "object", "properties": {"x": {"type": "string"}}, "title": "MyOutput"})

assert result["type"] == "json_schema"
assert result["name"] == "MyOutput"
assert result["strict"] is True
assert result["schema"]["additionalProperties"] is False
assert "title" not in result["schema"]


def test_convert_response_format_raw_json_schema_no_title() -> None:
"""Test raw JSON schema without title defaults name to 'response'."""
result = _convert_response_format({"type": "object", "properties": {"x": {"type": "string"}}})

assert result["name"] == "response"


def test_convert_response_format_raw_json_schema_with_anyof() -> None:
"""Test raw JSON schema with anyOf keyword is detected."""
result = _convert_response_format({"anyOf": [{"type": "string"}, {"type": "number"}]})

assert result["type"] == "json_schema"
assert result["strict"] is True


def test_from_azure_ai_tools_mcp_approval_mode_always() -> None:
"""Test from_azure_ai_tools converts MCP require_approval='always' to dict."""
tools = [
Expand Down
21 changes: 21 additions & 0 deletions python/packages/openai/agent_framework_openai/_chat_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,27 @@ def _convert_response_format(self, response_format: Mapping[str, Any]) -> dict[s
if format_type in {"json_object", "text"}:
return {"type": format_type}

# Handle raw JSON schemas (e.g. {"type": "object", "properties": {...}})
# by wrapping them in the expected json_schema envelope.
# Detect by checking for JSON Schema primitive types or known schema keywords.
json_schema_keywords = {"properties", "anyOf", "oneOf", "allOf", "$ref", "$defs"}
json_schema_primitive_types = {"object", "array", "string", "number", "integer", "boolean", "null"}
if format_type in json_schema_primitive_types or (
format_type is None and any(k in response_format for k in json_schema_keywords)
):
schema = dict(response_format)
if schema.get("type") == "object" and "additionalProperties" not in schema:
schema["additionalProperties"] = False
# Pop title from schema since OpenAI strict mode rejects unknown keys;
# use it as the schema name in the envelope instead.
name = str(schema.pop("title", None) or "response")
return {
"type": "json_schema",
"name": name,
"schema": schema,
"strict": True,
}

raise ChatClientInvalidRequestException("Unsupported response_format provided for Responses client.")

def _get_conversation_id(
Expand Down
65 changes: 65 additions & 0 deletions python/packages/openai/tests/openai/test_openai_chat_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1552,6 +1552,71 @@ def test_response_format_json_schema_missing_schema() -> None:
client._prepare_response_and_text_format(response_format=response_format, text_config=None)


def test_response_format_raw_json_schema_with_properties() -> None:
"""Test raw JSON schema with properties is wrapped in json_schema envelope."""
client = OpenAIChatClient(model="test-model", api_key="test-key")

response_format = {"type": "object", "properties": {"x": {"type": "string"}}, "title": "MyOutput"}

_, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None)

assert text_config is not None
fmt = text_config["format"]
assert fmt["type"] == "json_schema"
assert fmt["name"] == "MyOutput"
assert fmt["strict"] is True
assert fmt["schema"]["additionalProperties"] is False
assert "title" not in fmt["schema"]


def test_response_format_raw_json_schema_no_title() -> None:
"""Test raw JSON schema without title defaults name to 'response'."""
client = OpenAIChatClient(model="test-model", api_key="test-key")

response_format = {"type": "object", "properties": {"x": {"type": "string"}}}

_, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None)

assert text_config is not None
assert text_config["format"]["name"] == "response"


def test_response_format_raw_json_schema_preserves_additional_properties() -> None:
"""Test raw JSON schema preserves existing additionalProperties."""
client = OpenAIChatClient(model="test-model", api_key="test-key")

response_format = {"type": "object", "properties": {"x": {"type": "string"}}, "additionalProperties": True}

_, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None)

assert text_config is not None
assert text_config["format"]["schema"]["additionalProperties"] is True


def test_response_format_raw_json_schema_non_object_type() -> None:
"""Test raw JSON schema with non-object type does not inject additionalProperties."""
client = OpenAIChatClient(model="test-model", api_key="test-key")

response_format = {"type": "array", "items": {"type": "string"}}

_, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None)

assert text_config is not None
assert "additionalProperties" not in text_config["format"]["schema"]


def test_response_format_raw_json_schema_with_anyof() -> None:
"""Test raw JSON schema with anyOf keyword is detected."""
client = OpenAIChatClient(model="test-model", api_key="test-key")

response_format = {"anyOf": [{"type": "string"}, {"type": "number"}]}

_, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None)

assert text_config is not None
assert text_config["format"]["type"] == "json_schema"


def test_response_format_unsupported_type() -> None:
"""Test unsupported response_format type raises error."""
client = OpenAIChatClient(model="test-model", api_key="test-key")
Expand Down
14 changes: 8 additions & 6 deletions python/samples/02-agents/declarative/inline_yaml.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os

from agent_framework.declarative import AgentFactory
from azure.identity.aio import AzureCliCredential
Expand Down Expand Up @@ -31,16 +32,17 @@ async def main():

model:
id: =Env.AZURE_OPENAI_MODEL
connection:
kind: remote
endpoint: =Env.FOUNDRY_PROJECT_ENDPOINT
"""
# create the agent from the yaml
async with (
AzureCliCredential() as credential,
AgentFactory(client_kwargs={"credential": credential}, safe_mode=False).create_agent_from_yaml(
yaml_definition
) as agent,
AgentFactory(
client_kwargs={
"credential": credential,
"project_endpoint": os.environ["FOUNDRY_PROJECT_ENDPOINT"],
},
safe_mode=False,
).create_agent_from_yaml(yaml_definition) as agent,
):
response = await agent.run("What can you do for me?")
print("Agent response:", response.text)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,28 @@

from agent_framework import tool
from agent_framework.github import GitHubCopilotAgent
from copilot.generated.session_events import PermissionRequest
from copilot.types import PermissionRequestResult
from dotenv import load_dotenv
from pydantic import Field

# Load environment variables from .env file
load_dotenv()


def prompt_permission(request: PermissionRequest, context: dict[str, str]) -> PermissionRequestResult:
"""Permission handler that prompts the user for approval."""
print(f"\n[Permission Request: {request.kind}]")

if request.full_command_text is not None:
print(f" Command: {request.full_command_text}")

response = input("Approve? (y/n): ").strip().lower()
if response in ("y", "yes"):
return PermissionRequestResult(kind="approved")
return PermissionRequestResult(kind="denied-interactively-by-user")


# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production;
# see samples/02-agents/tools/function_tool_with_approval.py
# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py.
Expand All @@ -45,6 +60,7 @@ async def non_streaming_example() -> None:
agent = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent:
Expand All @@ -61,6 +77,7 @@ async def streaming_example() -> None:
agent = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent:
Expand All @@ -80,6 +97,7 @@ async def runtime_options_example() -> None:
agent = GitHubCopilotAgent(
instructions="Always respond in exactly 3 words.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,10 @@ async def main() -> None:
print(f"Agent: {result1}\n")

# Query that exercises the remote Microsoft Learn MCP server
# Remote MCP calls may take longer, so increase the timeout
query2 = "Search Microsoft Learn for 'Azure Functions Python' and summarize the top result"
print(f"User: {query2}")
result2 = await agent.run(query2)
result2 = await agent.run(query2, options={"timeout": 120})
print(f"Agent: {result2}\n")


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,24 @@

from agent_framework import tool
from agent_framework.github import GitHubCopilotAgent
from copilot.generated.session_events import PermissionRequest
from copilot.types import PermissionRequestResult
from pydantic import Field


def prompt_permission(request: PermissionRequest, context: dict[str, str]) -> PermissionRequestResult:
"""Permission handler that prompts the user for approval."""
print(f"\n[Permission Request: {request.kind}]")

if request.full_command_text is not None:
print(f" Command: {request.full_command_text}")

response = input("Approve? (y/n): ").strip().lower()
if response in ("y", "yes"):
return PermissionRequestResult(kind="approved")
return PermissionRequestResult(kind="denied-interactively-by-user")


# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production;
# see samples/02-agents/tools/function_tool_with_approval.py
# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py.
Expand All @@ -36,6 +51,7 @@ async def example_with_automatic_session_creation() -> None:
agent = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent:
Expand All @@ -50,7 +66,7 @@ async def example_with_automatic_session_creation() -> None:
print(f"\nUser: {query2}")
result2 = await agent.run(query2)
print(f"Agent: {result2}")
print("Note: Each call creates a separate session, so the agent doesn't remember previous context.\n")
print("Note: Each call creates a separate session, so the agent may not remember previous context.\n")


async def example_with_session_persistence() -> None:
Expand All @@ -60,6 +76,7 @@ async def example_with_session_persistence() -> None:
agent = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent:
Expand Down Expand Up @@ -96,6 +113,7 @@ async def example_with_existing_session_id() -> None:
agent1 = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent1:
Expand All @@ -117,6 +135,7 @@ async def example_with_existing_session_id() -> None:
agent2 = GitHubCopilotAgent(
instructions="You are a helpful weather agent.",
tools=[get_weather],
default_options={"on_permission_request": prompt_permission},
)

async with agent2:
Expand Down
Loading