Skip to content

Commit f43f541

Browse files
committed
handle just in case
1 parent 3badbeb commit f43f541

File tree

2 files changed

+37
-6
lines changed

2 files changed

+37
-6
lines changed

libs/partners/groq/langchain_groq/chat_models.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1463,15 +1463,26 @@ def _create_usage_metadata(groq_token_usage: dict) -> UsageMetadata:
14631463
or 0
14641464
)
14651465
total_tokens = groq_token_usage.get("total_tokens") or input_tokens + output_tokens
1466+
1467+
# Support both formats for token details:
1468+
# Responses API uses "*_tokens_details", Chat Completions API might use
1469+
# "prompt_token_details"
1470+
input_details_dict = (
1471+
groq_token_usage.get("input_tokens_details")
1472+
or groq_token_usage.get("prompt_tokens_details")
1473+
or {}
1474+
)
1475+
output_details_dict = (
1476+
groq_token_usage.get("output_tokens_details")
1477+
or groq_token_usage.get("completion_tokens_details")
1478+
or {}
1479+
)
1480+
14661481
input_token_details: dict = {
1467-
"cache_read": (groq_token_usage.get("input_tokens_details") or {}).get(
1468-
"cached_tokens"
1469-
),
1482+
"cache_read": input_details_dict.get("cached_tokens"),
14701483
}
14711484
output_token_details: dict = {
1472-
"reasoning": (groq_token_usage.get("output_tokens_details") or {}).get(
1473-
"reasoning_tokens"
1474-
),
1485+
"reasoning": output_details_dict.get("reasoning_tokens"),
14751486
}
14761487
usage_metadata: UsageMetadata = {
14771488
"input_tokens": input_tokens,

libs/partners/groq/tests/unit_tests/test_chat_models.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,26 @@ def test_create_usage_metadata_responses_api_format() -> None:
330330
assert "output_token_details" not in result
331331

332332

333+
def test_create_usage_metadata_chat_completions_with_details() -> None:
334+
"""Test usage metadata with hypothetical Chat Completions API format."""
335+
token_usage = {
336+
"prompt_tokens": 100,
337+
"completion_tokens": 50,
338+
"total_tokens": 150,
339+
"prompt_tokens_details": {"cached_tokens": 80},
340+
"completion_tokens_details": {"reasoning_tokens": 25},
341+
}
342+
343+
result = _create_usage_metadata(token_usage)
344+
345+
assert isinstance(result, dict)
346+
assert result["input_tokens"] == 100
347+
assert result["output_tokens"] == 50
348+
assert result["total_tokens"] == 150
349+
assert result.get("input_token_details", {}).get("cache_read") == 80
350+
assert result.get("output_token_details", {}).get("reasoning") == 25
351+
352+
333353
def test_create_usage_metadata_with_cached_tokens() -> None:
334354
"""Test usage metadata with prompt caching."""
335355
token_usage = {

0 commit comments

Comments
 (0)