|
3 | 3 | import mimetypes |
4 | 4 | import re |
5 | 5 | import logging |
| 6 | +import glob |
| 7 | +import platform |
6 | 8 | from reportportal_client.helpers import timestamp |
7 | 9 |
|
8 | 10 | logger = logging.getLogger(__name__) |
@@ -160,7 +162,127 @@ def extract_test_result_from_trajectory(trajectory_dir): |
160 | 162 | logger.error(f"Error extracting test result: {e}") |
161 | 163 | return False |
162 | 164 |
|
163 | | -def upload_test_results_to_rp(client, launch_id, test_path, trajectory_dir, force_stopped=False, video_path=None): |
| 165 | +def get_jan_log_paths(is_nightly=False): |
| 166 | + """ |
| 167 | + Get Jan application log file paths based on OS and version (nightly vs regular) |
| 168 | + Returns list of glob patterns for log files |
| 169 | + """ |
| 170 | + system = platform.system().lower() |
| 171 | + app_name = "Jan-nightly" if is_nightly else "Jan" |
| 172 | + |
| 173 | + if system == "windows": |
| 174 | + # Windows: %APPDATA%\Jan(-nightly)\data\logs\*.log |
| 175 | + appdata = os.path.expandvars("%APPDATA%") |
| 176 | + return [f"{appdata}\\{app_name}\\data\\logs\\*.log"] |
| 177 | + |
| 178 | + elif system == "darwin": # macOS |
| 179 | + # macOS: ~/Library/Application Support/Jan(-nightly)/data/logs/*.log |
| 180 | + home_dir = os.path.expanduser("~") |
| 181 | + return [f"{home_dir}/Library/Application Support/{app_name}/data/logs/*.log"] |
| 182 | + |
| 183 | + elif system == "linux": |
| 184 | + # Linux: ~/.local/share/Jan(-nightly)/data/logs/*.log |
| 185 | + home_dir = os.path.expanduser("~") |
| 186 | + return [f"{home_dir}/.local/share/{app_name}/data/logs/*.log"] |
| 187 | + |
| 188 | + else: |
| 189 | + logger.warning(f"Unsupported OS: {system}") |
| 190 | + return [] |
| 191 | + |
| 192 | +def upload_jan_logs(client, test_item_id, is_nightly=False, max_log_files=5): |
| 193 | + """ |
| 194 | + Upload Jan application log files to ReportPortal |
| 195 | + """ |
| 196 | + log_patterns = get_jan_log_paths(is_nightly) |
| 197 | + app_type = "nightly" if is_nightly else "regular" |
| 198 | + |
| 199 | + logger.info(f"Looking for Jan {app_type} logs...") |
| 200 | + |
| 201 | + all_log_files = [] |
| 202 | + for pattern in log_patterns: |
| 203 | + try: |
| 204 | + log_files = glob.glob(pattern) |
| 205 | + all_log_files.extend(log_files) |
| 206 | + logger.info(f"Found {len(log_files)} log files matching pattern: {pattern}") |
| 207 | + except Exception as e: |
| 208 | + logger.error(f"Error searching for logs with pattern {pattern}: {e}") |
| 209 | + |
| 210 | + if not all_log_files: |
| 211 | + logger.warning(f"No Jan {app_type} log files found") |
| 212 | + client.log( |
| 213 | + time=timestamp(), |
| 214 | + level="WARNING", |
| 215 | + message=f"📝 No Jan {app_type} application logs found", |
| 216 | + item_id=test_item_id |
| 217 | + ) |
| 218 | + return |
| 219 | + |
| 220 | + # Sort by modification time (newest first) and limit to max_log_files |
| 221 | + try: |
| 222 | + all_log_files.sort(key=lambda x: os.path.getmtime(x), reverse=True) |
| 223 | + log_files_to_upload = all_log_files[:max_log_files] |
| 224 | + |
| 225 | + logger.info(f"Uploading {len(log_files_to_upload)} most recent Jan {app_type} log files") |
| 226 | + |
| 227 | + for i, log_file in enumerate(log_files_to_upload, 1): |
| 228 | + try: |
| 229 | + file_size = os.path.getsize(log_file) |
| 230 | + file_name = os.path.basename(log_file) |
| 231 | + |
| 232 | + logger.info(f"Uploading log file {i}/{len(log_files_to_upload)}: {file_name} ({file_size} bytes)") |
| 233 | + |
| 234 | + # Read log file content |
| 235 | + with open(log_file, 'r', encoding='utf-8', errors='ignore') as f: |
| 236 | + log_content = f.read() |
| 237 | + |
| 238 | + # If log is too large, truncate it |
| 239 | + max_log_size = 1024 * 1024 # 1MB limit |
| 240 | + if len(log_content) > max_log_size: |
| 241 | + truncated_content = log_content[-max_log_size:] # Keep last 1MB |
| 242 | + log_content = f"[LOG TRUNCATED - showing last {max_log_size} characters]\n\n{truncated_content}" |
| 243 | + |
| 244 | + # Upload as text attachment |
| 245 | + client.log( |
| 246 | + time=timestamp(), |
| 247 | + level="INFO", |
| 248 | + message=f"📝 Jan {app_type} application log: {file_name}", |
| 249 | + item_id=test_item_id, |
| 250 | + attachment={ |
| 251 | + "name": f"jan_{app_type}_log_{i}_{file_name}", |
| 252 | + "data": log_content.encode('utf-8'), |
| 253 | + "mime": "text/plain" |
| 254 | + } |
| 255 | + ) |
| 256 | + |
| 257 | + logger.info(f"Successfully uploaded log: {file_name}") |
| 258 | + |
| 259 | + except Exception as e: |
| 260 | + logger.error(f"Error uploading log file {log_file}: {e}") |
| 261 | + client.log( |
| 262 | + time=timestamp(), |
| 263 | + level="ERROR", |
| 264 | + message=f"Failed to upload log file {os.path.basename(log_file)}: {str(e)}", |
| 265 | + item_id=test_item_id |
| 266 | + ) |
| 267 | + |
| 268 | + # Add summary log |
| 269 | + client.log( |
| 270 | + time=timestamp(), |
| 271 | + level="INFO", |
| 272 | + message=f"📝 Uploaded {len(log_files_to_upload)} Jan {app_type} log files (total available: {len(all_log_files)})", |
| 273 | + item_id=test_item_id |
| 274 | + ) |
| 275 | + |
| 276 | + except Exception as e: |
| 277 | + logger.error(f"Error processing Jan logs: {e}") |
| 278 | + client.log( |
| 279 | + time=timestamp(), |
| 280 | + level="ERROR", |
| 281 | + message=f"Error processing Jan {app_type} logs: {str(e)}", |
| 282 | + item_id=test_item_id |
| 283 | + ) |
| 284 | + |
| 285 | +def upload_test_results_to_rp(client, launch_id, test_path, trajectory_dir, force_stopped=False, video_path=None, is_nightly=False): |
164 | 286 | """ |
165 | 287 | Upload test results to ReportPortal with proper status based on test result |
166 | 288 | """ |
@@ -281,6 +403,10 @@ def upload_test_results_to_rp(client, launch_id, test_path, trajectory_dir, forc |
281 | 403 | item_id=test_item_id |
282 | 404 | ) |
283 | 405 |
|
| 406 | + # Upload Jan application logs |
| 407 | + logger.info("Uploading Jan application logs...") |
| 408 | + upload_jan_logs(client, test_item_id, is_nightly=is_nightly, max_log_files=5) |
| 409 | + |
284 | 410 | # Upload all turn data with appropriate status |
285 | 411 | # If test failed, mark all turns as failed |
286 | 412 | force_fail_turns = (final_status == "FAILED") |
|
0 commit comments