Skip to content

Commit 113889f

Browse files
authored
feat: add reasoning_effort parameter to LiteLLMAPIBackend and LLMSett… (microsoft#754)
* feat: Add reasoning_effort parameter to LiteLLMAPIBackend and LLMSettings * style: Use consistent quotation marks for reasoning_effort literals
1 parent 6bfc1e5 commit 113889f

File tree

2 files changed

+4
-0
lines changed

2 files changed

+4
-0
lines changed

rdagent/oai/backend/litellm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ def _create_chat_completion_inner_function( # type: ignore[no-untyped-def] # no
8888
stream=LITELLM_SETTINGS.chat_stream,
8989
temperature=LITELLM_SETTINGS.chat_temperature,
9090
max_tokens=LITELLM_SETTINGS.chat_max_tokens,
91+
reasoning_effort=LITELLM_SETTINGS.reasoning_effort,
9192
**kwargs,
9293
)
9394
logger.info(

rdagent/oai/llm_conf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
from pathlib import Path
4+
from typing import Literal
45

56
from pydantic import Field
67

@@ -14,6 +15,8 @@ class LLMSettings(ExtendedBaseSettings):
1415
chat_model: str = "gpt-4-turbo"
1516
embedding_model: str = "text-embedding-3-small"
1617

18+
reasoning_effort: Literal["low", "medium", "high"] | None = None
19+
1720
# TODO: most of the settings are only used on deprec.DeprecBackend.
1821
# So they should move the settings to that folder.
1922

0 commit comments

Comments
 (0)