|
13 | 13 | from eval_protocol.dataset_logger.dataset_logger import DatasetLogger |
14 | 14 | from eval_protocol.mcp.execution.policy import LiteLLMPolicy |
15 | 15 | from eval_protocol.mcp.mcp_multi_client import MCPMultiClient |
16 | | -from eval_protocol.models import EvaluationRow, Message, ChatCompletionContentPartTextParam |
| 16 | +from eval_protocol.models import ( |
| 17 | + EvaluationRow, |
| 18 | + Message, |
| 19 | + ChatCompletionContentPartParam, |
| 20 | + ChatCompletionContentPartTextParam, |
| 21 | +) |
17 | 22 | from openai.types import CompletionUsage |
18 | 23 | from eval_protocol.pytest.rollout_processor import RolloutProcessor |
19 | 24 | from eval_protocol.pytest.types import Dataset, RolloutProcessorConfig |
@@ -98,7 +103,7 @@ def append_message_and_log(self, message: Message): |
98 | 103 | self.messages.append(message) |
99 | 104 | self.logger.log(self.evaluation_row) |
100 | 105 |
|
101 | | - async def call_agent(self) -> Optional[Union[str, List[ChatCompletionContentPartTextParam]]]: |
| 106 | + async def call_agent(self) -> Optional[Union[str, List[ChatCompletionContentPartParam]]]: |
102 | 107 | """ |
103 | 108 | Call the assistant with the user query. |
104 | 109 | """ |
@@ -222,7 +227,7 @@ def _get_content_from_tool_result(self, tool_result: CallToolResult | str) -> Li |
222 | 227 |
|
223 | 228 | def _format_tool_message_content( |
224 | 229 | self, content: List[TextContent] |
225 | | - ) -> Union[str, List[ChatCompletionContentPartTextParam]]: |
| 230 | + ) -> Union[str, List[ChatCompletionContentPartParam]]: |
226 | 231 | """Format tool result content for inclusion in a tool message. |
227 | 232 |
|
228 | 233 | - If a single text item, return plain string per OpenAI semantics. |
|
0 commit comments