Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 0 additions & 139 deletions tests/entrypoints/openai/test_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -599,145 +599,6 @@ async def test_structured_outputs_choice_chat_logprobs(
assert item.logprob >= -9999.0, f"Failed (top_logprobs={top_logprobs})"


@pytest.mark.asyncio
async def test_named_tool_use(
client: openai.AsyncOpenAI,
sample_json_schema,
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": (
"Give an example JSON for an employee profile using the specified tool."
),
},
]
tools = [
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}

# non-streaming

chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
)
message = chat_completion.choices[0].message
assert len(message.content) == 0
json_string = message.tool_calls[0].function.arguments
json1 = json.loads(json_string)
jsonschema.validate(instance=json1, schema=sample_json_schema)

messages.append({"role": "assistant", "content": json_string})
messages.append(
{"role": "user", "content": "Give me another one with a different name and age"}
)

# streaming

stream = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
stream=True,
)

output = []
finish_reason_count = 0
async for chunk in stream:
delta = chunk.choices[0].delta
if delta.role:
assert delta.role == "assistant"
assert delta.content is None or len(delta.content) == 0
if delta.tool_calls:
output.append(delta.tool_calls[0].function.arguments)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
# finish reason should only return in last block
assert finish_reason_count == 1
json2 = json.loads("".join(output))
jsonschema.validate(instance=json2, schema=sample_json_schema)
assert json1["name"] != json2["name"]
assert json1["age"] != json2["age"]


@pytest.mark.asyncio
async def test_inconsistent_tool_choice_and_tools(
client: openai.AsyncOpenAI, sample_json_schema
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": f"Give an example JSON for an employee profile that "
f"fits this schema: {sample_json_schema}",
},
]

with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tool_choice={
"type": "function",
"function": {"name": "dummy_function_name"},
},
)

with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={
"type": "function",
"function": {"name": "nondefined_function_name"},
},
)
with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={},
)


@pytest.mark.asyncio
async def test_response_format_json_object(client: openai.AsyncOpenAI):
for _ in range(2):
Expand Down
139 changes: 139 additions & 0 deletions tests/entrypoints/openai/test_completion_with_function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,3 +341,142 @@
else:
# No tool called — just print model's direct reply
assert message.content is not None


@pytest.mark.asyncio
async def test_named_tool_use(
client: openai.AsyncOpenAI,
sample_json_schema,
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": (
"Give an example JSON for an employee profile using the specified tool."
),
},
]
tools = [
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}

# non-streaming

chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
)
message = chat_completion.choices[0].message
assert len(message.content) == 0
json_string = message.tool_calls[0].function.arguments
json1 = json.loads(json_string)

Check failure on line 384 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "json" is not defined [name-defined]

Check failure on line 384 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "json" is not defined [name-defined]

Check failure on line 384 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Ruff (F821)

tests/entrypoints/openai/test_completion_with_function_calling.py:384:13: F821 Undefined name `json`
jsonschema.validate(instance=json1, schema=sample_json_schema)

Check failure on line 385 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "jsonschema" is not defined [name-defined]

Check failure on line 385 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "jsonschema" is not defined [name-defined]

Check failure on line 385 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Ruff (F821)

tests/entrypoints/openai/test_completion_with_function_calling.py:385:5: F821 Undefined name `jsonschema`

messages.append({"role": "assistant", "content": json_string})
messages.append(
{"role": "user", "content": "Give me another one with a different name and age"}
)

# streaming

stream = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
stream=True,
)

output = []
finish_reason_count = 0
async for chunk in stream:
delta = chunk.choices[0].delta
if delta.role:
assert delta.role == "assistant"
assert delta.content is None or len(delta.content) == 0
if delta.tool_calls:
output.append(delta.tool_calls[0].function.arguments)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
# finish reason should only return in last block
assert finish_reason_count == 1
json2 = json.loads("".join(output))

Check failure on line 416 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "json" is not defined [name-defined]

Check failure on line 416 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Ruff (F821)

tests/entrypoints/openai/test_completion_with_function_calling.py:416:13: F821 Undefined name `json`
jsonschema.validate(instance=json2, schema=sample_json_schema)

Check failure on line 417 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Name "jsonschema" is not defined [name-defined]

Check failure on line 417 in tests/entrypoints/openai/test_completion_with_function_calling.py

View workflow job for this annotation

GitHub Actions / pre-commit

Ruff (F821)

tests/entrypoints/openai/test_completion_with_function_calling.py:417:5: F821 Undefined name `jsonschema`
assert json1["name"] != json2["name"]
assert json1["age"] != json2["age"]


@pytest.mark.asyncio
async def test_inconsistent_tool_choice_and_tools(
client: openai.AsyncOpenAI, sample_json_schema
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": f"Give an example JSON for an employee profile that "
f"fits this schema: {sample_json_schema}",
},
]

with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tool_choice={
"type": "function",
"function": {"name": "dummy_function_name"},
},
)

with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={
"type": "function",
"function": {"name": "nondefined_function_name"},
},
)
with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={},
)