Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 24 additions & 5 deletions python/packages/azure-ai/agent_framework_azure_ai/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,13 +300,26 @@ def _create_text_format_config(
raise ServiceInvalidRequestError("response_format must be a Pydantic model or mapping.")

async def _get_agent_reference_or_create(
self, run_options: dict[str, Any], messages_instructions: str | None
self,
run_options: dict[str, Any],
messages_instructions: str | None,
chat_options: ChatOptions | None = None,
) -> dict[str, str]:
"""Determine which agent to use and create if needed.

Args:
run_options: The prepared options for the API call.
messages_instructions: Instructions extracted from messages.
chat_options: The chat options containing response_format and other settings.

Returns:
dict[str, str]: The agent reference to use.
"""
# chat_options is needed separately because the base class excludes response_format
# from run_options (transforming it to text/text_format for OpenAI). Azure's agent
# creation API requires the original response_format to build its own config format.
if chat_options is None:
chat_options = ChatOptions()
# Agent name must be explicitly provided by the user.
if self.agent_name is None:
raise ServiceInitializationError(
Expand Down Expand Up @@ -341,8 +354,14 @@ async def _get_agent_reference_or_create(
if "top_p" in run_options:
args["top_p"] = run_options["top_p"]

if "response_format" in run_options:
response_format = run_options["response_format"]
# response_format is accessed from chat_options or additional_properties
# since the base class excludes it from run_options
response_format: Any = (
chat_options.response_format
if chat_options.response_format is not None
else chat_options.additional_properties.get("response_format")
)
if response_format:
args["text"] = PromptAgentDefinitionText(format=self._create_text_format_config(response_format))

# Combine instructions from messages and options
Expand Down Expand Up @@ -390,12 +409,12 @@ async def _prepare_options(

if not self._is_application_endpoint:
# Application-scoped response APIs do not support "agent" property.
agent_reference = await self._get_agent_reference_or_create(run_options, instructions)
agent_reference = await self._get_agent_reference_or_create(run_options, instructions, chat_options)
run_options["extra_body"] = {"agent": agent_reference}

# Remove properties that are not supported on request level
# but were configured on agent level
exclude = ["model", "tools", "response_format", "temperature", "top_p"]
exclude = ["model", "tools", "response_format", "temperature", "top_p", "text", "text_format"]

for property in exclude:
run_options.pop(property, None)
Expand Down
126 changes: 111 additions & 15 deletions python/packages/azure-ai/tests/test_azure_ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -723,9 +723,10 @@ async def test_azure_ai_client_agent_creation_with_response_format(
mock_agent.version = "1.0"
mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent)

run_options = {"model": "test-model", "response_format": ResponseFormatModel}
run_options = {"model": "test-model"}
chat_options = ChatOptions(response_format=ResponseFormatModel)

await client._get_agent_reference_or_create(run_options, None) # type: ignore
await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore

# Verify agent was created with response format configuration
call_args = mock_project_client.agents.create_version.call_args
Expand Down Expand Up @@ -776,19 +777,18 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format(
"additionalProperties": False,
}

run_options = {
"model": "test-model",
"response_format": {
"type": "json_schema",
"json_schema": {
"name": runtime_schema["title"],
"strict": True,
"schema": runtime_schema,
},
run_options = {"model": "test-model"}
response_format_mapping = {
"type": "json_schema",
"json_schema": {
"name": runtime_schema["title"],
"strict": True,
"schema": runtime_schema,
},
}
chat_options = ChatOptions(response_format=response_format_mapping) # type: ignore

await client._get_agent_reference_or_create(run_options, None) # type: ignore
await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore

call_args = mock_project_client.agents.create_version.call_args
created_definition = call_args[1]["definition"]
Expand All @@ -805,7 +805,7 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format(
async def test_azure_ai_client_prepare_options_excludes_response_format(
mock_project_client: MagicMock,
) -> None:
"""Test that prepare_options excludes response_format from final run options."""
"""Test that prepare_options excludes response_format, text, and text_format from final run options."""
client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0")

messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])]
Expand All @@ -815,7 +815,12 @@ async def test_azure_ai_client_prepare_options_excludes_response_format(
patch.object(
client.__class__.__bases__[0],
"_prepare_options",
return_value={"model": "test-model", "response_format": ResponseFormatModel},
return_value={
"model": "test-model",
"response_format": ResponseFormatModel,
"text": {"format": {"type": "json_schema", "name": "test"}},
"text_format": ResponseFormatModel,
},
),
patch.object(
client,
Expand All @@ -825,8 +830,11 @@ async def test_azure_ai_client_prepare_options_excludes_response_format(
):
run_options = await client._prepare_options(messages, chat_options)

# response_format should be excluded from final run options
# response_format, text, and text_format should be excluded from final run options
# because they are configured at agent level, not request level
assert "response_format" not in run_options
assert "text" not in run_options
assert "text_format" not in run_options
# But extra_body should contain agent reference
assert "extra_body" in run_options
assert run_options["extra_body"]["agent"]["name"] == "test-agent"
Expand Down Expand Up @@ -1009,3 +1017,91 @@ async def test_azure_ai_chat_client_agent_with_tools() -> None:
assert response.text is not None
assert len(response.text) > 0
assert any(word in response.text.lower() for word in ["sunny", "25"])


class ReleaseBrief(BaseModel):
"""Structured output model for release brief."""

title: str = Field(description="A short title for the release.")
summary: str = Field(description="A brief summary of what was released.")
highlights: list[str] = Field(description="Key highlights from the release.")
model_config = ConfigDict(extra="forbid")


@pytest.mark.flaky
@skip_if_azure_ai_integration_tests_disabled
async def test_azure_ai_chat_client_agent_with_response_format() -> None:
"""Test ChatAgent with response_format (structured output) using AzureAIClient."""
async with (
temporary_chat_client(agent_name="ResponseFormatAgent") as chat_client,
ChatAgent(chat_client=chat_client) as agent,
):
response = await agent.run(
"Summarize the following release notes into a ReleaseBrief:\n\n"
"Version 2.0 Release Notes:\n"
"- Added new streaming API for real-time responses\n"
"- Improved error handling with detailed messages\n"
"- Performance boost of 50% in batch processing\n"
"- Fixed memory leak in connection pooling",
response_format=ReleaseBrief,
)

# Validate response
assert isinstance(response, AgentRunResponse)
assert response.value is not None
assert isinstance(response.value, ReleaseBrief)

# Validate structured output fields
brief = response.value
assert len(brief.title) > 0
assert len(brief.summary) > 0
assert len(brief.highlights) > 0


@pytest.mark.flaky
@skip_if_azure_ai_integration_tests_disabled
async def test_azure_ai_chat_client_agent_with_runtime_json_schema() -> None:
"""Test ChatAgent with runtime JSON schema (structured output) using AzureAIClient."""
runtime_schema = {
"title": "WeatherDigest",
"type": "object",
"properties": {
"location": {"type": "string"},
"conditions": {"type": "string"},
"temperature_c": {"type": "number"},
"advisory": {"type": "string"},
},
"required": ["location", "conditions", "temperature_c", "advisory"],
"additionalProperties": False,
}

async with (
temporary_chat_client(agent_name="RuntimeSchemaAgent") as chat_client,
ChatAgent(chat_client=chat_client) as agent,
):
response = await agent.run(
"Give a brief weather digest for Seattle.",
additional_chat_options={
"response_format": {
"type": "json_schema",
"json_schema": {
"name": runtime_schema["title"],
"strict": True,
"schema": runtime_schema,
},
},
},
)

# Validate response
assert isinstance(response, AgentRunResponse)
assert response.text is not None

# Parse JSON and validate structure
import json

parsed = json.loads(response.text)
assert "location" in parsed
assert "conditions" in parsed
assert "temperature_c" in parsed
assert "advisory" in parsed
15 changes: 11 additions & 4 deletions python/packages/core/agent_framework/openai/_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,16 +439,23 @@ async def _prepare_options(
if (tool_choice := run_options.get("tool_choice")) and isinstance(tool_choice, dict) and "mode" in tool_choice:
run_options["tool_choice"] = tool_choice["mode"]

# additional properties
# additional properties (excluding response_format which is handled separately)
additional_options = {
key: value for key, value in chat_options.additional_properties.items() if value is not None
key: value
for key, value in chat_options.additional_properties.items()
if value is not None and key != "response_format"
}
if additional_options:
run_options.update(additional_options)

# response format and text config (after additional_properties so user can pass text via additional_properties)
response_format = chat_options.response_format
text_config = run_options.pop("text", None)
# Check both chat_options.response_format and additional_properties for response_format
response_format: Any = (
chat_options.response_format
if chat_options.response_format is not None
else chat_options.additional_properties.get("response_format")
)
text_config: Any = run_options.pop("text", None)
response_format, text_config = self._prepare_response_and_text_format(
response_format=response_format, text_config=text_config
)
Expand Down
88 changes: 88 additions & 0 deletions python/packages/core/tests/openai/test_openai_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -2355,3 +2355,91 @@ async def test_openai_responses_client_agent_local_mcp_tool() -> None:
assert len(response.text) > 0
# Should contain Azure-related content since it's asking about Azure CLI
assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"])


class ReleaseBrief(BaseModel):
"""Structured output model for release brief testing."""

title: str
summary: str
highlights: list[str]
model_config = {"extra": "forbid"}


@pytest.mark.flaky
@skip_if_openai_integration_tests_disabled
async def test_openai_responses_client_agent_with_response_format_pydantic() -> None:
"""Integration test for response_format with Pydantic model using OpenAI Responses Client."""
async with ChatAgent(
chat_client=OpenAIResponsesClient(),
instructions="You are a helpful assistant that returns structured JSON responses.",
) as agent:
response = await agent.run(
"Summarize the following release notes into a ReleaseBrief:\n\n"
"Version 2.0 Release Notes:\n"
"- Added new streaming API for real-time responses\n"
"- Improved error handling with detailed messages\n"
"- Performance boost of 50% in batch processing\n"
"- Fixed memory leak in connection pooling",
response_format=ReleaseBrief,
)

# Validate response
assert isinstance(response, AgentRunResponse)
assert response.value is not None
assert isinstance(response.value, ReleaseBrief)

# Validate structured output fields
brief = response.value
assert len(brief.title) > 0
assert len(brief.summary) > 0
assert len(brief.highlights) > 0


@pytest.mark.flaky
@skip_if_openai_integration_tests_disabled
async def test_openai_responses_client_agent_with_runtime_json_schema() -> None:
"""Integration test for response_format with runtime JSON schema using OpenAI Responses Client."""
runtime_schema = {
"title": "WeatherDigest",
"type": "object",
"properties": {
"location": {"type": "string"},
"conditions": {"type": "string"},
"temperature_c": {"type": "number"},
"advisory": {"type": "string"},
},
"required": ["location", "conditions", "temperature_c", "advisory"],
"additionalProperties": False,
}

async with ChatAgent(
chat_client=OpenAIResponsesClient(),
instructions="Return only JSON that matches the provided schema. Do not add commentary.",
) as agent:
response = await agent.run(
"Give a brief weather digest for Seattle.",
additional_chat_options={
"response_format": {
"type": "json_schema",
"json_schema": {
"name": runtime_schema["title"],
"strict": True,
"schema": runtime_schema,
},
},
},
)

# Validate response
assert isinstance(response, AgentRunResponse)
assert response.text is not None

# Parse JSON and validate structure
import json

parsed = json.loads(response.text)
assert "location" in parsed
assert "conditions" in parsed
assert "temperature_c" in parsed
assert "advisory" in parsed
Loading