diff --git a/src/kimi_cli/config.py b/src/kimi_cli/config.py index b135958ca..73845b86d 100644 --- a/src/kimi_cli/config.py +++ b/src/kimi_cli/config.py @@ -12,6 +12,7 @@ SecretStr, ValidationError, field_serializer, + field_validator, model_validator, ) from tomlkit.exceptions import TOMLKitError @@ -44,9 +45,19 @@ class LLMProvider(BaseModel): """Environment variables to set before creating the provider instance""" custom_headers: dict[str, str] | None = None """Custom headers to include in API requests""" + reasoning_key: str | None = None + """Key name for reasoning/thinking content used by the openai_legacy provider (e.g. "reasoning_content").""" oauth: OAuthRef | None = None """OAuth credential reference (do not store tokens here).""" + @field_validator("reasoning_key", mode="before") + @classmethod + def normalize_reasoning_key(cls, v: str | None) -> str | None: + if isinstance(v, str): + v = v.strip() + return v or None + return v + @field_serializer("api_key", when_used="json") def dump_secret(self, v: SecretStr): return v.get_secret_value() diff --git a/src/kimi_cli/llm.py b/src/kimi_cli/llm.py index a72644c05..3dcb1680f 100644 --- a/src/kimi_cli/llm.py +++ b/src/kimi_cli/llm.py @@ -152,6 +152,7 @@ def create_llm( model=model.model, base_url=provider.base_url, api_key=resolved_api_key, + reasoning_key=provider.reasoning_key, ) case "openai_responses": from kosong.contrib.chat_provider.openai_responses import OpenAIResponses diff --git a/tests/core/test_create_llm.py b/tests/core/test_create_llm.py index 27bb54d4f..02570ab23 100644 --- a/tests/core/test_create_llm.py +++ b/tests/core/test_create_llm.py @@ -1,8 +1,10 @@ from __future__ import annotations +import pytest from inline_snapshot import snapshot from kosong.chat_provider.echo import EchoChatProvider from kosong.chat_provider.kimi import Kimi +from kosong.contrib.chat_provider.openai_legacy import OpenAILegacy from pydantic import SecretStr from kimi_cli.config import LLMModel, LLMProvider @@ -93,3 +95,43 @@ def test_create_llm_requires_base_url_for_kimi(): model = LLMModel(provider="kimi", model="kimi-base", max_context_size=4096) assert create_llm(provider, model) is None + + +def test_create_llm_openai_legacy_passes_reasoning_key(): + provider = LLMProvider( + type="openai_legacy", + base_url="http://localhost:8000/v1", + api_key=SecretStr("dummy"), + reasoning_key="reasoning_content", + ) + model = LLMModel(provider="vllm", model="some-model", max_context_size=8192) + + llm = create_llm(provider, model) + assert llm is not None + assert isinstance(llm.chat_provider, OpenAILegacy) + assert llm.chat_provider._reasoning_key == "reasoning_content" + + +def test_create_llm_openai_legacy_reasoning_key_default_none(): + provider = LLMProvider( + type="openai_legacy", + base_url="http://localhost:8000/v1", + api_key=SecretStr("dummy"), + ) + model = LLMModel(provider="vllm", model="some-model", max_context_size=8192) + + llm = create_llm(provider, model) + assert llm is not None + assert isinstance(llm.chat_provider, OpenAILegacy) + assert llm.chat_provider._reasoning_key is None + + +@pytest.mark.parametrize("value", ["", " ", "\t\n"]) +def test_reasoning_key_blank_normalized_to_none(value): + provider = LLMProvider( + type="openai_legacy", + base_url="http://localhost:8000/v1", + api_key=SecretStr("dummy"), + reasoning_key=value, + ) + assert provider.reasoning_key is None