1111from .__handlers import __send_metrics , __send_logs , __calculate_cost , __check
1212
1313# Decorator function to monitor chat completion
14- def monitor (func , metrics_url , logs_url , metrics_username , logs_username , access_token , use_async = False ): # pylint: disable=too-many-arguments, line-too-long
14+ def monitor (func , metrics_url , logs_url , metrics_username , logs_username , access_token , use_async = False , disable_content = False , environment = "default" ): # pylint: disable=too-many-arguments, line-too-long
1515 """
1616 A decorator function to monitor chat completions using the OpenAI API.
1717
@@ -72,7 +72,8 @@ async def async_wrapper(*args, **kwargs):
7272 {
7373 "stream" : {
7474 "job" : "integrations/openai" ,
75- "prompt" : prompt ,
75+ "prompt" : prompt ,
76+ "environment" : environment ,
7677 "model" : response .model ,
7778 "role" : response .choices [0 ].message .role ,
7879 "finish_reason" : response .choices [0 ].finish_reason ,
@@ -90,39 +91,39 @@ async def async_wrapper(*args, **kwargs):
9091 }
9192 ]
9293 }
93-
94- # Send logs to the specified logs URL
95- __send_logs (logs_url = logs_url ,
96- logs_username = logs_username ,
97- access_token = access_token ,
98- logs = logs
99- )
94+ if disable_content is True :
95+ # Send logs to the specified logs URL
96+ __send_logs (logs_url = logs_url ,
97+ logs_username = logs_username ,
98+ access_token = access_token ,
99+ logs = logs
100+ )
100101
101102 # Prepare metrics to be sent
102103 metrics = [
103104 # Metric to track the number of completion tokens used in the response
104105 f'openai,job=integrations/openai,'
105- f'source=python_chatv2,model={ response .model } '
106+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
106107 f'completionTokens={ response .usage .completion_tokens } ' ,
107108
108109 # Metric to track the number of prompt tokens used in the response
109110 f'openai,job=integrations/openai,'
110- f'source=python_chatv2,model={ response .model } '
111+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
111112 f'promptTokens={ response .usage .prompt_tokens } ' ,
112113
113114 # Metric to track the total number of tokens used in the response
114115 f'openai,job=integrations/openai,'
115- f'source=python_chatv2,model={ response .model } '
116+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
116117 f'totalTokens={ response .usage .total_tokens } ' ,
117118
118119 # Metric to track the usage cost based on the model and token usage
119120 f'openai,job=integrations/openai,'
120- f'source=python_chatv2,model={ response .model } '
121+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
121122 f'usageCost={ cost } ' ,
122123
123124 # Metric to track the duration of the API request and response cycle
124125 f'openai,job=integrations/openai,'
125- f'source=python_chatv2,model={ response .model } '
126+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
126127 f'requestDuration={ duration } ' ,
127128 ]
128129
@@ -168,7 +169,8 @@ def wrapper(*args, **kwargs):
168169 {
169170 "stream" : {
170171 "job" : "integrations/openai" ,
171- "prompt" : prompt ,
172+ "prompt" : prompt ,
173+ "environment" : environment ,
172174 "model" : response .model ,
173175 "role" : response .choices [0 ].message .role ,
174176 "finish_reason" : response .choices [0 ].finish_reason ,
@@ -186,39 +188,39 @@ def wrapper(*args, **kwargs):
186188 }
187189 ]
188190 }
189-
190- # Send logs to the specified logs URL
191- __send_logs (logs_url = logs_url ,
192- logs_username = logs_username ,
193- access_token = access_token ,
194- logs = logs
195- )
191+ if disable_content is True :
192+ # Send logs to the specified logs URL
193+ __send_logs (logs_url = logs_url ,
194+ logs_username = logs_username ,
195+ access_token = access_token ,
196+ logs = logs
197+ )
196198
197199 # Prepare metrics to be sent
198200 metrics = [
199201 # Metric to track the number of completion tokens used in the response
200202 f'openai,job=integrations/openai,'
201- f'source=python_chatv2,model={ response .model } '
203+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
202204 f'completionTokens={ response .usage .completion_tokens } ' ,
203205
204206 # Metric to track the number of prompt tokens used in the response
205207 f'openai,job=integrations/openai,'
206- f'source=python_chatv2,model={ response .model } '
208+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
207209 f'promptTokens={ response .usage .prompt_tokens } ' ,
208210
209211 # Metric to track the total number of tokens used in the response
210212 f'openai,job=integrations/openai,'
211- f'source=python_chatv2,model={ response .model } '
213+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
212214 f'totalTokens={ response .usage .total_tokens } ' ,
213215
214216 # Metric to track the usage cost based on the model and token usage
215217 f'openai,job=integrations/openai,'
216- f'source=python_chatv2,model={ response .model } '
218+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
217219 f'usageCost={ cost } ' ,
218220
219221 # Metric to track the duration of the API request and response cycle
220222 f'openai,job=integrations/openai,'
221- f'source=python_chatv2,model={ response .model } '
223+ f'source=python_chatv2,model={ response .model } ,environment= { environment } '
222224 f'requestDuration={ duration } ' ,
223225 ]
224226
0 commit comments