44# SPDX-License-Identifier: BSD 2-Clause License
55#
66
7+ import asyncio
78import os
89
910from dotenv import load_dotenv
1011from loguru import logger
12+ from pipecat .adapters .schemas .function_schema import FunctionSchema
13+ from pipecat .adapters .schemas .tools_schema import ToolsSchema
1114from pipecat .audio .turn .smart_turn .local_smart_turn_v3 import LocalSmartTurnAnalyzerV3
1215from pipecat .audio .vad .silero import SileroVADAnalyzer
1316from pipecat .audio .vad .vad_analyzer import VADParams
14- from pipecat .frames .frames import LLMRunFrame
17+ from pipecat .frames .frames import LLMRunFrame , TTSSpeakFrame
1518from pipecat .pipeline .pipeline import Pipeline
1619from pipecat .pipeline .runner import PipelineRunner
1720from pipecat .pipeline .task import PipelineParams , PipelineTask
2023 LLMContextAggregatorPair ,
2124 LLMUserAggregatorParams ,
2225)
26+ from pipecat .processors .frameworks .rtvi import RTVIFunctionCallReportLevel , RTVIObserverParams
2327from pipecat .runner .types import RunnerArguments
2428from pipecat .runner .utils import create_transport
2529from pipecat .services .cartesia .tts import CartesiaTTSService
2630from pipecat .services .deepgram .stt import DeepgramSTTService
27- from pipecat .services .deepgram . tts import DeepgramTTSService
31+ from pipecat .services .llm_service import FunctionCallParams
2832from pipecat .services .openai .llm import OpenAILLMService
2933from pipecat .transports .base_transport import BaseTransport , TransportParams
3034from pipecat .turns .user_stop import TurnAnalyzerUserTurnStopStrategy
3135from pipecat .turns .user_turn_strategies import UserTurnStrategies
3236
3337load_dotenv (override = True )
3438
39+
40+ async def fetch_weather_from_api (params : FunctionCallParams ):
41+ await asyncio .sleep (1 )
42+ await params .result_callback ({"conditions" : "nice" , "temperature" : "75" })
43+
44+
45+ async def fetch_restaurant_recommendation (params : FunctionCallParams ):
46+ await params .result_callback ({"name" : "The Golden Dragon" })
47+
48+
3549# We use lambdas to defer transport parameter creation until the transport
3650# type is selected at runtime.
3751transport_params = {
@@ -51,26 +65,56 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
5165 api_key = os .getenv ("CARTESIA_API_KEY" ),
5266 voice_id = "71a7ad14-091c-4e8e-a314-022ece01c121" , # British Reading Lady
5367 )
54- # tts = DeepgramTTSService(api_key=os.getenv("DEEPGRAM_API_KEY"), voice="aura-2-andromeda-en")
5568
5669 llm = OpenAILLMService (api_key = os .getenv ("OPENAI_API_KEY" ))
5770
71+ llm .register_function ("get_current_weather" , fetch_weather_from_api )
72+ llm .register_function ("get_restaurant_recommendation" , fetch_restaurant_recommendation )
73+
74+ @llm .event_handler ("on_function_calls_started" )
75+ async def on_function_calls_started (service , function_calls ):
76+ await tts .queue_frame (TTSSpeakFrame ("Let me check on that." ))
77+
78+ weather_function = FunctionSchema (
79+ name = "get_current_weather" ,
80+ description = "Get the current weather" ,
81+ properties = {
82+ "location" : {
83+ "type" : "string" ,
84+ "description" : "The city and state, e.g. San Francisco, CA" ,
85+ },
86+ "format" : {
87+ "type" : "string" ,
88+ "enum" : ["celsius" , "fahrenheit" ],
89+ "description" : "The temperature unit to use. Infer this from the user's location." ,
90+ },
91+ },
92+ required = ["location" , "format" ],
93+ )
94+ restaurant_function = FunctionSchema (
95+ name = "get_restaurant_recommendation" ,
96+ description = "Get a restaurant recommendation" ,
97+ properties = {
98+ "location" : {
99+ "type" : "string" ,
100+ "description" : "The city and state, e.g. San Francisco, CA" ,
101+ },
102+ },
103+ required = ["location" ],
104+ )
105+ tools = ToolsSchema (standard_tools = [weather_function , restaurant_function ])
106+
58107 messages = [
59108 {
60109 "role" : "system" ,
61110 "content" : "You are a helpful LLM in a WebRTC call. Your goal is to demonstrate your capabilities in a succinct way. Your output will be spoken aloud, so avoid special characters that can't easily be spoken, such as emojis or bullet points. Respond to what the user said in a creative and helpful way." ,
62111 },
63112 ]
64113
65- context = LLMContext (messages )
114+ context = LLMContext (messages , tools )
66115 user_aggregator , assistant_aggregator = LLMContextAggregatorPair (
67116 context ,
68- user_params = LLMUserAggregatorParams (
69- user_turn_strategies = UserTurnStrategies (
70- stop = [TurnAnalyzerUserTurnStopStrategy (turn_analyzer = LocalSmartTurnAnalyzerV3 ())]
71- ),
72- vad_analyzer = SileroVADAnalyzer (params = VADParams (stop_secs = 0.2 )),
73- ),
117+ user_params = LLMUserAggregatorParams (vad_analyzer = SileroVADAnalyzer ()),
74118 )
75119
76120 pipeline = Pipeline (
@@ -92,6 +136,9 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
92136 enable_usage_metrics = True ,
93137 ),
94138 idle_timeout_secs = runner_args .pipeline_idle_timeout_secs ,
139+ rtvi_observer_params = RTVIObserverParams (
140+ function_call_report_level = {"*" : RTVIFunctionCallReportLevel .FULL }
141+ ),
95142 )
96143
97144 @task .rtvi .event_handler ("on_client_ready" )
0 commit comments