Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/kimi_cli/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
SecretStr,
ValidationError,
field_serializer,
field_validator,
model_validator,
)
from tomlkit.exceptions import TOMLKitError
Expand Down Expand Up @@ -44,9 +45,19 @@ class LLMProvider(BaseModel):
"""Environment variables to set before creating the provider instance"""
custom_headers: dict[str, str] | None = None
"""Custom headers to include in API requests"""
reasoning_key: str | None = None
"""Key name for reasoning/thinking content used by the openai_legacy provider (e.g. "reasoning_content")."""
oauth: OAuthRef | None = None
"""OAuth credential reference (do not store tokens here)."""

@field_validator("reasoning_key", mode="before")
@classmethod
def normalize_reasoning_key(cls, v: str | None) -> str | None:
if isinstance(v, str):
v = v.strip()
return v or None
return v

@field_serializer("api_key", when_used="json")
def dump_secret(self, v: SecretStr):
return v.get_secret_value()
Expand Down
1 change: 1 addition & 0 deletions src/kimi_cli/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ def create_llm(
model=model.model,
base_url=provider.base_url,
api_key=resolved_api_key,
reasoning_key=provider.reasoning_key,
)
case "openai_responses":
from kosong.contrib.chat_provider.openai_responses import OpenAIResponses
Expand Down
42 changes: 42 additions & 0 deletions tests/core/test_create_llm.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from __future__ import annotations

import pytest
from inline_snapshot import snapshot
from kosong.chat_provider.echo import EchoChatProvider
from kosong.chat_provider.kimi import Kimi
from kosong.contrib.chat_provider.openai_legacy import OpenAILegacy
from pydantic import SecretStr

from kimi_cli.config import LLMModel, LLMProvider
Expand Down Expand Up @@ -93,3 +95,43 @@ def test_create_llm_requires_base_url_for_kimi():
model = LLMModel(provider="kimi", model="kimi-base", max_context_size=4096)

assert create_llm(provider, model) is None


def test_create_llm_openai_legacy_passes_reasoning_key():
provider = LLMProvider(
type="openai_legacy",
base_url="http://localhost:8000/v1",
api_key=SecretStr("dummy"),
reasoning_key="reasoning_content",
)
model = LLMModel(provider="vllm", model="some-model", max_context_size=8192)

llm = create_llm(provider, model)
assert llm is not None
assert isinstance(llm.chat_provider, OpenAILegacy)
assert llm.chat_provider._reasoning_key == "reasoning_content"


def test_create_llm_openai_legacy_reasoning_key_default_none():
provider = LLMProvider(
type="openai_legacy",
base_url="http://localhost:8000/v1",
api_key=SecretStr("dummy"),
)
model = LLMModel(provider="vllm", model="some-model", max_context_size=8192)

llm = create_llm(provider, model)
assert llm is not None
assert isinstance(llm.chat_provider, OpenAILegacy)
assert llm.chat_provider._reasoning_key is None


@pytest.mark.parametrize("value", ["", " ", "\t\n"])
def test_reasoning_key_blank_normalized_to_none(value):
provider = LLMProvider(
type="openai_legacy",
base_url="http://localhost:8000/v1",
api_key=SecretStr("dummy"),
reasoning_key=value,
)
assert provider.reasoning_key is None