From 8e14a511cbef2781a3ef6d1c604659d5e6700316 Mon Sep 17 00:00:00 2001 From: Agampreet Singh Date: Mon, 10 Nov 2025 16:41:38 +0530 Subject: [PATCH] Add reasoning_effort parameter to Azure/OpenAI configs --- mem0/configs/llms/azure.py | 3 +++ mem0/configs/llms/openai.py | 4 ++++ mem0/llms/base.py | 2 ++ 3 files changed, 9 insertions(+) diff --git a/mem0/configs/llms/azure.py b/mem0/configs/llms/azure.py index f4eb859a21..1d9b564280 100644 --- a/mem0/configs/llms/azure.py +++ b/mem0/configs/llms/azure.py @@ -22,6 +22,7 @@ def __init__( enable_vision: bool = False, vision_details: Optional[str] = "auto", http_client_proxies: Optional[dict] = None, + reasoning_effort: Optional[str] = None, # Azure OpenAI-specific parameters azure_kwargs: Optional[Dict[str, Any]] = None, ): @@ -38,6 +39,7 @@ def __init__( enable_vision: Enable vision capabilities, defaults to False vision_details: Vision detail level, defaults to "auto" http_client_proxies: HTTP client proxy settings, defaults to None + reasoning_effort: Reasoning level (low, medium, high), defaults to None azure_kwargs: Azure-specific configuration, defaults to None """ # Initialize base parameters @@ -54,4 +56,5 @@ def __init__( ) # Azure OpenAI-specific parameters + self.reasoning_effort = reasoning_effort self.azure_kwargs = AzureConfig(**(azure_kwargs or {})) diff --git a/mem0/configs/llms/openai.py b/mem0/configs/llms/openai.py index e0a0a6f2d0..ccd712d814 100644 --- a/mem0/configs/llms/openai.py +++ b/mem0/configs/llms/openai.py @@ -21,6 +21,7 @@ def __init__( enable_vision: bool = False, vision_details: Optional[str] = "auto", http_client_proxies: Optional[dict] = None, + reasoning_effort: Optional[str] = None, # OpenAI-specific parameters openai_base_url: Optional[str] = None, models: Optional[List[str]] = None, @@ -45,12 +46,14 @@ def __init__( enable_vision: Enable vision capabilities, defaults to False vision_details: Vision detail level, defaults to "auto" http_client_proxies: HTTP client proxy settings, defaults to None + reasoning_effort: Reasoning level (low, medium, high), defaults to None openai_base_url: OpenAI API base URL, defaults to None models: List of models for OpenRouter, defaults to None route: OpenRouter route strategy, defaults to "fallback" openrouter_base_url: OpenRouter base URL, defaults to None site_url: Site URL for OpenRouter, defaults to None app_name: Application name for OpenRouter, defaults to None + store: Store conversation, defaults to False response_callback: Optional callback for monitoring LLM responses. """ # Initialize base parameters @@ -67,6 +70,7 @@ def __init__( ) # OpenAI-specific parameters + self.reasoning_effort = reasoning_effort self.openai_base_url = openai_base_url self.models = models self.route = route diff --git a/mem0/llms/base.py b/mem0/llms/base.py index 1212541026..fab9773912 100644 --- a/mem0/llms/base.py +++ b/mem0/llms/base.py @@ -88,6 +88,8 @@ def _get_supported_params(self, **kwargs) -> Dict: supported_params["tools"] = kwargs["tools"] if "tool_choice" in kwargs: supported_params["tool_choice"] = kwargs["tool_choice"] + if hasattr(self.config, 'reasoning_effort') and self.config.reasoning_effort: + supported_params["reasoning_effort"] = self.config.reasoning_effort return supported_params else: