import json
import os
from pathlib import Path
from log_surgeon import Parser, PATTERN


# Custom delimiters can be specified, though the default is usually sufficient
parser = Parser(delimiters=rf" \t\r\n:,!;%@/()[].")

# Step 1 - Timestamp pattern
# parser.add_var("TS", rf"(?<ts>\d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2})")

parser.add_var("TS", rf"(?<ts>\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}:\d{2},\d{3})")
# Step 2 - Verbosity level
parser.add_var("LEVEL", rf"(?<level>(DEBUG)|(INFO)|(WARN)|(ERROR)|(FATAL))")

# Step 3 - Java stack traces and exceptions
parser.add_var(
    "SYSTEM_EXCEPTION",
    rf"(?<system_exception_type>({PATTERN.JAVA_PACKAGE_SEGMENT})+[{PATTERN.JAVA_IDENTIFIER_CHARSET}]*Exception): "
    rf"(?<system_exception_msg>{PATTERN.LOG_LINE})"
)
parser.add_var(
    rf"SYSTEM_STACK_TRACE", rf"(\s{{1,4}}at (?<system_stack>{PATTERN.JAVA_STACK_LOCATION})"
)

# Step 4 - CRITICAL: Add file path patterns BEFORE Java class patterns

# RESOURCE and resource patterns - capture the ENTIRE file:// URL
parser.add_var("RESOURCE_URL", rf"RESOURCE=(?<resource_url>file:{PATTERN.LOG_LINE_NO_WHITE_SPACE})")
parser.add_var("RESOURCE_LOWER", rf"resource=(?<resource_path>file:{PATTERN.LOG_LINE_NO_WHITE_SPACE})")

# HTTP Request/Response paths
parser.add_var("HTTP_REQUEST", rf"REQUEST (?<request_path>/{PATTERN.LOG_LINE_NO_WHITE_SPACE}) on")
parser.add_var("HTTP_RESPONSE", rf"RESPONSE (?<response_path>/{PATTERN.LOG_LINE_NO_WHITE_SPACE}) +(?<http_status>{PATTERN.INT})")

# TLD search patterns for JAR files
parser.add_var("TLD_SEARCH", rf"TLD search of (?<tld_path>file:{PATTERN.LOG_LINE_NO_WHITE_SPACE})")

# Java class with object ID (e.g., org.mortbay.jetty.HttpConnection@638a2c95)
# For fully qualified class names
parser.add_var("CLASS_NAME_OBJECT_ID", rf"(?<class_name>{PATTERN.JAVA_FULLY_QUALIFIED_CLASS_NAME})@(?<object_id>[0-9a-fA-F]+)")

# For simple class names with object ID (e.g., ServletHandler@5939a379)
parser.add_var("SIMPLE_CLASS_OBJECT_ID", rf"(?<simple_class>[A-Z][a-zA-Z0-9]+)@(?<obj_id>[0-9a-fA-F]+)")

# Exit codes
parser.add_var("EXIT_CODE", rf"exit code (?<exit_code>{PATTERN.INT})")

# HDFS URLs
parser.add_var("HDFS_URL", rf"hdfs://(?<hdfs_host>[a-zA-Z0-9\.\-]+):(?<hdfs_port>{PATTERN.PORT})")

# IP addresses and ports
parser.add_var("IP_PORT", rf"(?<ip>{PATTERN.IPV4}):(?<port>{PATTERN.PORT})")
parser.add_var("IP_ADDR", rf"(?<ip_address>{PATTERN.IPV4})")

# Host with IP pattern
parser.add_var("HOST_IP", rf"(?<host>[a-zA-Z0-9\-]+)/(?<host_ip>{PATTERN.IPV4})")

# UNIX signals
parser.add_var("UNIX_SIGNALS", rf"registered UNIX signal handlers for \[(?<signals>[A-Z, ]+)\]")

# Command line arguments
parser.add_var("ARGS_LIST", rf"args = \[(?<args>[\-a-zA-Z0-9, ]+)\]")

# Session IDs
parser.add_var("SESSION_ID", rf"Session ID (?<session_id>[a-zA-Z0-9]+)")

# MD5 hashes
parser.add_var("MD5_HASH", rf"MD5 (?<md5_hash>[0-9a-fA-F]{{32}})")

# JAR file names (more specific pattern)
parser.add_var("JAR_FILE", rf"(?<jar_file>{PATTERN.LINUX_FILE_NAME})\.jar")

# Version strings
parser.add_var("VERSION", rf"version = (?<version>[0-9\.\-a-zA-Z]+)")

# Build information
parser.add_var("COMMIT_HASH", rf"\-r (?<commit_hash>[a-f0-9]+)")
parser.add_var("COMPILED_BY", rf"compiled by '(?<compiled_by>[a-zA-Z0-9]+)'")
parser.add_var("COMPILE_DATE", rf"on (?<compile_date>[0-9T:Z]+)")

# Java version
parser.add_var("JAVA_VERSION", rf"java = (?<java_version>[0-9\._]+)")

# Configuration settings
parser.add_var("CONFIG_SETTING", rf"Setting (?<config_key>[a-zA-Z0-9\.\-_]+) to (?<config_value>[a-zA-Z0-9\.\-_:/]+)")

# Implementation class
parser.add_var("IMPL_CLASS", rf"impl=(?<impl_class>{PATTERN.JAVA_FULLY_QUALIFIED_CLASS_NAME})")

# Cache settings
parser.add_var("CACHE_TIMEOUT", rf"cacheTimeout=(?<cache_timeout>{PATTERN.INT})")
parser.add_var("WARNING_DELTA", rf"warningDeltaMs=(?<warning_delta>{PATTERN.INT})")

# Startup messages
parser.add_var("STARTUP_MSG", rf"STARTUP_MSG: (?<startup_msg>{PATTERN.LOG_LINE})")

# Memory metrics
parser.add_var("MEMORY_SIZE", rf"(?<memory_size>{PATTERN.INT}) bytes")
parser.add_var("CAPACITY_VALUE", rf"capacity = (?<capacity>{PATTERN.INT})")
parser.add_var("LOAD_FACTOR", rf"load factor = (?<load_factor>{PATTERN.FLOAT})")

# Thread info
parser.add_var("THREAD_INFO", rf"\[(?<thread_name>[^\]]+)\]")

# Boolean values
parser.add_var("BOOLEAN_VALUE", rf"is (?<bool_key>[a-zA-Z]+):(?<bool_value>(true)|(false))")

# Hadoop services
parser.add_var("HADOOP_SERVICE", rf"Hadoop:service=(?<service>[a-zA-Z]+),name=(?<name>[a-zA-Z]+)")

# Block IDs
parser.add_var("BLOCK_ID", rf"blk_(?<block_id>{PATTERN.INT})")
parser.add_var("BLOCK_POOL_ID", rf"BP\-(?<block_pool_id>[0-9\-]+)")

# DataNode patterns
parser.add_var("DATANODE_ID", rf"DatanodeInfoWithStorage\[(?<datanode_info>[^\]]+)\]")

# NameNode patterns
parser.add_var("NAMENODE_STATE", rf"STATE\* Safe mode is (?<safemode_state>(ON)|(OFF))")

# Percentages
parser.add_var("PERCENTAGE", rf"(?<percentage>{PATTERN.FLOAT})%")

# Time durations
parser.add_var("TIME_MS", rf"(?<time_value>{PATTERN.INT})ms")
parser.add_var("TIME_SEC", rf"(?<time_value>{PATTERN.INT})s")
parser.add_var("TIME_SECONDS", rf"(?<seconds>{PATTERN.INT}) seconds")

# Memory addresses
parser.add_var("MEMORY_ADDR", rf"0x(?<memory_addr>[0-9a-fA-F]+)")

# Network operations
parser.add_var("NETWORK_OP", rf"(?<operation>(RECEIVED)|(SENT)|(CONNECTED)|(DISCONNECTED))")

# File operations
parser.add_var("FILE_OP", rf"File operation (?<file_operation>(CREATE)|(DELETE)|(RENAME)|(APPEND)|(TRUNCATE))")

# Replication counts
parser.add_var("REPLICATIONS", rf"neededReplications = (?<needed>{PATTERN.INT}) pendingReplications = (?<pending>{PATTERN.INT})")

# User authentication
parser.add_var("USER_AUTH", rf"as:(?<username>[a-zA-Z0-9]+) \(auth:(?<auth>[A-Z]+)\)")

# Annotation patterns
parser.add_var("JAVA_ANNOTATION", rf"@(?<annotation>{PATTERN.JAVA_FULLY_QUALIFIED_CLASS_NAME})")

# Java class names - MUST COME AFTER file path patterns
parser.add_var("JAVA_CLASS", rf"(?<java_class>{PATTERN.JAVA_FULLY_QUALIFIED_CLASS_NAME})")
#
# Step 5 - Compile parser
parser.compile()

# message: *Datanode*

# Directory containing unstructured log files
input_dir = "raw-logs/"
output_file = "parsed_logs.jsonl"

# Process all .log files in the directory
log_files = sorted(Path(input_dir).glob("*.log"))
total_files = len(log_files)
processed_lines = 0
failed_lines = 0

print(f"Found {total_files} log files to process")

with open(output_file, 'w') as out_f:
    for file_idx, log_file in enumerate(log_files, 1):
        print(f"Processing [{file_idx}/{total_files}]: {log_file.name}")
        
        with open(log_file, 'r', encoding='utf-8', errors='ignore') as in_f:
            for line_num, line in enumerate(in_f, 1):
                line = line.rstrip('\n\r')
                if not line.strip():
                    continue
                
                try:
                    event = parser.parse_event(line)
                    
                    # Access extracted data
                    parsed_vars = event.get_resolved_dict()
                    result = {
                        "source_file": log_file.name,
                        "line_number": line_num,
                        "message": event.get_log_message().strip(),
                        "LogType": event.get_log_type().strip(),
                        "parsed-vars": parsed_vars,
                    }
                    if parsed_vars.get("ts"):
                        result["timestamp"] = parsed_vars["ts"]
                    
                    out_f.write(json.dumps(result) + '\n')
                    processed_lines += 1
                    
                except Exception as e:
                    # If parsing fails, still write the raw line for reference
                    failed_lines += 1
                    result = {
                        "source_file": log_file.name,
                        "line_number": line_num,
                        "message": line,
                        "LogType": "",
                        "parsed-vars": {},
                        "parse_error": str(e)
                    }
                    out_f.write(json.dumps(result) + '\n')

print(f"\nProcessing complete!")
print(f"  Processed lines: {processed_lines}")
print(f"  Failed lines: {failed_lines}")
print(f"  Results written to {output_file}")
