Skip to main content

AirLibrary/Logging/
mod.rs

1//! # Structured Logging Module
2//!
3//! Provides comprehensive structured logging with JSON output, request ID
4//! propagation, context-aware logging, log rotation, sensitive data filtering,
5//! and validation.
6//!
7//! ## Responsibilities
8//!
9//! ### Structured Logging
10//! - JSON output format for machine parsing and analysis
11//! - Request ID and trace ID propagation across log entries
12//! - Context-aware logging with operation tracking
13//! - Log level filtering (TRACE, DEBUG, INFO, WARN, ERROR)
14//!
15//! ### Log Rotation
16//! - Size-based log rotation to prevent disk exhaustion
17//! - Time-based rotation (daily) for archival
18//! - Automatic cleanup of old log files
19//! - Compressed log file storage for space efficiency
20//!
21//! ### Context Management
22//! - Thread-local context storage for async operations
23//! - Automatic context propagation across await points
24//! - Correlation ID linking distributed requests
25//! - User and session tracking
26//!
27//! ### Sensitive Data Handling
28//! - Automatic redaction of sensitive fields
29//! - Configurable sensitive patterns
30//! - Sanitization of error messages
31//! - Audit logging for security events
32//!
33//! ### Log Validation
34//! - Structured log data validation before output
35//! - Schema enforcement for consistent format
36//! - Size limits on log messages
37//! - Malformed log rejection
38//!
39//! ## Integration with Mountain
40//!
41//! Logs flow to Mountain's debugging infrastructure:
42//! - Real-time log streaming to debug console
43//! - Historical log search and filtering
44//! - Error aggregation and alerting
45//! - Performance profiling logs
46//!
47//! ## VSCode Debugging References
48//!
49//! Similar logging patterns used in VSCode for:
50//! - Exception and error tracking
51//! - Debug output for extension development
52//! - Performance profiling traces
53//! - Cross-process communication logging
54//!
55//! Reference:
56//! vs/base/common/errors
57//!
58//! # FUTURE Enhancements
59//!
60//! - [DISTRIBUTED TRACING] Tighter integration with Tracing module
61//! - `ELASTICSEARCH`: Direct log export to Elasticsearch/Logstash
62//! - [LOG ANALYSIS] Automatic anomaly detection in logs
63//! - `KIBANA`: Pre-built Kibana dashboards
64//! - [LOG PARSING] Support for custom log formats
65//! ## Sensitive Data Handling
66//!
67//! All logs are automatically sanitized:
68//! - Passwords, tokens, and secrets are redacted
69//! - User-identifiable information is masked
70//! - API keys and secrets are removed
71//! - Error messages are parsed for sensitive patterns
72
73use std::{
74	collections::HashMap,
75	path::{Path, PathBuf},
76	sync::{Arc, Mutex},
77	time::{SystemTime, UNIX_EPOCH},
78};
79
80use serde::{Deserialize, Serialize};
81use tracing_subscriber::{fmt::format::FmtSpan, prelude::*};
82use tracing_appender::rolling::Rotation;
83
84use crate::{Result, dev_log};
85
86/// Configuration for log rotation and management
87#[derive(Debug, Clone, Serialize, Deserialize)]
88pub struct LogRotationConfig {
89	/// Maximum size of a single log file in bytes before rotation
90	pub MaxFileSizeBytes:u64,
91
92	/// Maximum number of rotated log files to retain
93	pub MaxFiles:usize,
94
95	/// Rotation strategy (daily, hourly, never)
96	pub Rotation:LogRotation,
97
98	/// Whether to compress rotated log files
99	pub Compress:bool,
100
101	/// Log directory path
102	pub LogDirectory:String,
103
104	/// Log file name prefix
105	pub LogFilePrefix:String,
106}
107
108/// Log rotation strategies
109#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
110pub enum LogRotation {
111	/// Rotate daily
112	Daily,
113
114	/// Rotate every hour
115	Hourly,
116
117	/// Rotate every minute (for debugging)
118	Minutely,
119
120	/// Never rotate automatically
121	Never,
122}
123
124impl Default for LogRotation {
125	fn default() -> Self { Self::Daily }
126}
127
128impl Default for LogRotationConfig {
129	fn default() -> Self {
130		Self {
131			MaxFileSizeBytes:100 * 1024 * 1024, // 100 MB
132			MaxFiles:30,                        // Keep 30 days of logs
133			Rotation:LogRotation::Daily,
134
135			Compress:true,
136
137			LogDirectory:"./Log".to_string(),
138
139			LogFilePrefix:"Air".to_string(),
140		}
141	}
142}
143
144impl LogRotationConfig {
145	/// Validate log rotation configuration
146	pub fn Validate(&self) -> Result<()> {
147		if self.MaxFileSizeBytes == 0 {
148			return Err("MaxFileSizeBytes must be greater than 0".into());
149		}
150
151		if self.MaxFileSizeBytes > 10 * 1024 * 1024 * 1024 {
152			// Max 10 GB
153			return Err("MaxFileSizeBytes cannot exceed 10 GB".into());
154		}
155
156		if self.MaxFiles == 0 {
157			return Err("MaxFiles must be greater than 0".into());
158		}
159
160		if self.MaxFiles > 365 {
161			// Max 1 year retention
162			return Err("MaxFiles cannot exceed 365".into());
163		}
164
165		Ok(())
166	}
167
168	/// Convert to tracing_appender Rotation
169	pub fn ToTracingRotation(&self) -> Rotation {
170		match self.Rotation {
171			LogRotation::Daily => Rotation::DAILY,
172
173			LogRotation::Hourly => Rotation::HOURLY,
174
175			LogRotation::Minutely => Rotation::NEVER, // No minutely support
176			LogRotation::Never => Rotation::NEVER,
177		}
178	}
179}
180
181/// Sensitive data patterns for redaction
182#[derive(Debug, Clone, Serialize, Deserialize)]
183pub struct SensitiveDataConfig {
184	/// Enable automatic sensitive data redaction
185	pub Enabled:bool,
186
187	/// Custom patterns to redact (regex)
188	pub CustomPatterns:Vec<String>,
189
190	/// Standard patterns to include (password, token, secret, etc.)
191	pub IncludeStandardPatterns:bool,
192}
193
194impl Default for SensitiveDataConfig {
195	fn default() -> Self { Self { Enabled:true, CustomPatterns:Vec::new(), IncludeStandardPatterns:true } }
196}
197
198/// Context for structured logging with request IDs and metadata
199#[derive(Debug, Clone, Serialize, Deserialize)]
200pub struct LogContext {
201	pub RequestId:String,
202
203	pub TraceId:String,
204
205	pub SpanId:String,
206
207	pub UserId:Option<String>,
208
209	pub SessionId:Option<String>,
210
211	pub Operation:String,
212
213	pub Metadata:HashMap<String, String>,
214}
215
216impl LogContext {
217	/// Create a new log context
218	pub fn New(Operation:impl Into<String>) -> Self {
219		let RequestId = crate::Utility::GenerateRequestId();
220
221		let TraceId = crate::Utility::GenerateRequestId();
222
223		let SpanId = uuid::Uuid::new_v4().to_string();
224
225		Self {
226			RequestId,
227
228			TraceId,
229
230			SpanId,
231
232			UserId:None,
233
234			SessionId:None,
235
236			Operation:Operation.into(),
237
238			Metadata:HashMap::new(),
239		}
240	}
241
242	/// Validate log context for required fields
243	pub fn Validate(&self) -> Result<()> {
244		if self.RequestId.is_empty() {
245			return Err("RequestId cannot be empty".into());
246		}
247
248		if self.TraceId.is_empty() {
249			return Err("TraceId cannot be empty".into());
250		}
251
252		if self.Operation.is_empty() {
253			return Err("Operation cannot be empty".into());
254		}
255
256		Ok(())
257	}
258
259	/// Set user ID in context
260	pub fn WithUserId(mut self, UserId:String) -> Self {
261		self.UserId = Some(UserId);
262
263		self
264	}
265
266	/// Set session ID in context
267	pub fn WithSessionId(mut self, SessionId:String) -> Self {
268		self.SessionId = Some(SessionId);
269
270		self
271	}
272
273	/// Add metadata to context
274	pub fn WithMetadata(mut self, Key:String, Value:String) -> Self {
275		self.Metadata.insert(Key, Value);
276
277		self
278	}
279
280	/// Add multiple metadata entries
281	pub fn WithMetadataMap(mut self, Metadata:HashMap<String, String>) -> Self {
282		self.Metadata.extend(Metadata);
283
284		self
285	}
286}
287
288thread_local! {
289
290	static LOG_CONTEXT: std::cell::RefCell<Option<LogContext>> = std::cell::RefCell::new(None);
291}
292
293/// Set the log context for the current thread
294pub fn SetLogContext(Context:LogContext) {
295	if let Err(e) = Context.Validate() {
296		dev_log!("air", "error: [Logging] Invalid log context provided: {:?}", e);
297
298		return;
299	}
300
301	LOG_CONTEXT.with(|ctx| {
302		*ctx.borrow_mut() = Some(Context);
303	});
304}
305
306/// Get the current log context
307pub fn GetLogContext() -> Option<LogContext> { LOG_CONTEXT.with(|Context| Context.borrow().clone()) }
308
309/// Clear the log context for the current thread
310pub fn ClearLogContext() {
311	LOG_CONTEXT.with(|Context| {
312		*Context.borrow_mut() = None;
313	});
314}
315
316/// Log file manager for rotation and cleanup
317#[allow(dead_code)]
318pub struct LogManager {
319	Config:LogRotationConfig,
320
321	CurrentFile:Arc<Mutex<Option<PathBuf>>>,
322
323	CurrentSize:Arc<Mutex<u64>>,
324}
325
326impl LogManager {
327	#[allow(dead_code)]
328	fn new(Config:LogRotationConfig) -> Result<Self> {
329		Config.Validate()?;
330
331		// Ensure log directory exists
332		std::fs::create_dir_all(&Config.LogDirectory)?;
333
334		Ok(Self {
335			Config,
336			CurrentFile:Arc::new(Mutex::new(None)),
337			CurrentSize:Arc::new(Mutex::new(0)),
338		})
339	}
340
341	/// Check if log rotation is needed
342	#[allow(dead_code)]
343	fn ShouldRotate(&self) -> bool {
344		let size = *self.CurrentSize.lock().unwrap();
345
346		size >= self.Config.MaxFileSizeBytes
347	}
348
349	/// Perform log rotation
350	#[allow(dead_code)]
351	fn Rotate(&self) -> Result<()> {
352		let CurrentFile = self.CurrentFile.lock().unwrap();
353
354		if let Some(ref FilePath) = *CurrentFile {
355			// Rename current file with timestamp
356			let Timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
357
358			let RotatedPath = format!("{}.{}.log", FilePath.display(), Timestamp);
359
360			std::fs::rename(FilePath, &RotatedPath)?;
361
362			// Compress if enabled
363			if self.Config.Compress {
364				self.CompressFile(&RotatedPath)?;
365			}
366
367			// Cleanup old log files
368			self.CleanupOldLogs()?;
369		}
370
371		*self.CurrentSize.lock().unwrap() = 0;
372
373		Ok(())
374	}
375
376	/// Compress a log file
377	#[allow(dead_code)]
378	fn CompressFile(&self, path:&str) -> crate::Result<()> {
379		// Basic compression - in production would use actual compression
380		let _ = path;
381
382		Ok(())
383	}
384
385	/// Cleanup old log files
386	#[allow(dead_code)]
387	fn CleanupOldLogs(&self) -> Result<()> {
388		let log_dir = Path::new(&self.Config.LogDirectory);
389
390		if !log_dir.exists() {
391			return Ok(());
392		}
393
394		let mut log_files:Vec<_> = std::fs::read_dir(log_dir)?
395			.filter_map(|e| e.ok())
396			.filter(|e| {
397				e.path()
398					.extension()
399					.and_then(|s| s.to_str())
400					.map(|ext| ext == "log")
401					.unwrap_or(false)
402			})
403			.collect();
404
405		// Sort by modification time (newest first)
406		log_files.sort_by(|a, b| {
407			let a_time = a.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
408			let b_time = b.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
409			b_time.cmp(&a_time)
410		});
411
412		// Keep only max_files
413		for file in log_files.into_iter().skip(self.Config.MaxFiles) {
414			let _ = std::fs::remove_file(file.path());
415		}
416
417		Ok(())
418	}
419}
420
421/// Sensitive data filter for log sanitization
422#[derive(Debug, Clone)]
423pub struct SensitiveDataFilter {
424	enabled:bool,
425
426	patterns:Vec<regex::Regex>,
427}
428
429impl Default for SensitiveDataFilter {
430	fn default() -> Self {
431		let mut patterns = Vec::new();
432
433		// Standard sensitive patterns - simplified to avoid escaping issues
434		patterns.push(regex::Regex::new(r"(?i)password[=[:space:]]+\S+").unwrap());
435
436		patterns.push(regex::Regex::new(r"(?i)token[=[:space:]]+\S+").unwrap());
437
438		patterns.push(regex::Regex::new(r"(?i)secret[=[:space:]]+\S+").unwrap());
439
440		patterns.push(regex::Regex::new(r"(?i)(api|private)[_-]?key[=[:space:]]+\S+").unwrap());
441
442		patterns.push(regex::Regex::new(r"(?i)authorization[=[:space:]]+Bearer[[:space:]]+\S+").unwrap());
443
444		patterns.push(regex::Regex::new(r"(?i)credential[=[:space:]]+\S+").unwrap());
445
446		Self { enabled:true, patterns }
447	}
448}
449
450impl SensitiveDataFilter {
451	fn new(Config:SensitiveDataConfig) -> Result<Self> {
452		let mut filter = Self::default();
453
454		filter.enabled = Config.Enabled;
455
456		if !Config.IncludeStandardPatterns {
457			filter.patterns.clear();
458		}
459
460		// Add custom patterns
461		for pattern in &Config.CustomPatterns {
462			match regex::Regex::new(pattern) {
463				Ok(re) => filter.patterns.push(re),
464
465				Err(e) => dev_log!("air", "warn: [Logging] Failed to compile custom regex '{}': {}", pattern, e),
466			}
467		}
468
469		Ok(filter)
470	}
471
472	/// Filter sensitive data from a string
473	fn Filter(&self, input:&str) -> String {
474		if !self.enabled {
475			return input.to_string();
476		}
477
478		let mut filtered = input.to_string();
479
480		for pattern in &self.patterns {
481			filtered = pattern.replace_all(&filtered, "[REDACTED]").to_string();
482		}
483
484		filtered
485	}
486}
487
488/// Structured log entry for validation
489#[derive(Debug, Clone, Serialize, Deserialize)]
490pub struct StructuredLogEntry {
491	pub Timestamp:u64,
492
493	pub Level:String,
494
495	pub Message:String,
496
497	pub RequestId:Option<String>,
498
499	pub TraceId:Option<String>,
500
501	pub SpanId:Option<String>,
502
503	pub Operation:Option<String>,
504
505	pub UserId:Option<String>,
506
507	pub Metadata:HashMap<String, String>,
508}
509
510impl StructuredLogEntry {
511	/// Validate log entry structure
512	pub fn Validate(&self) -> Result<()> {
513		if self.Level.is_empty() {
514			return Err("log level cannot be empty".into());
515		}
516
517		if self.Message.is_empty() {
518			return Err("log message cannot be empty".into());
519		}
520
521		if !["TRACE", "DEBUG", "INFO", "WARN", "ERROR"].contains(&self.Level.as_str()) {
522			return Err(format!("invalid log level: {}", self.Level).into());
523		}
524
525		if self.Message.len() > 10000 {
526			// Max 10KB message
527			return Err("log message too large".into());
528		}
529
530		Ok(())
531	}
532}
533
534/// Context-aware logger for structured logging
535#[derive(Debug, Clone)]
536pub struct ContextLogger {
537	json_output:bool,
538
539	log_file_path:Option<String>,
540
541	#[allow(dead_code)]
542	rotation_config:Option<LogRotationConfig>,
543
544	sensitive_filter:Arc<SensitiveDataFilter>,
545
546	initialized:Arc<Mutex<bool>>,
547}
548
549impl ContextLogger {
550	/// Create a new context logger
551	pub fn New(json_output:bool, log_file_path:Option<String>) -> Self {
552		Self {
553			json_output,
554
555			log_file_path,
556
557			rotation_config:None,
558
559			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
560
561			initialized:Arc::new(Mutex::new(false)),
562		}
563	}
564
565	/// Create with log rotation configuration
566	pub fn WithRotation(
567		json_output:bool,
568
569		log_file_path:Option<String>,
570
571		rotation_config:LogRotationConfig,
572	) -> Result<Self> {
573		rotation_config.Validate()?;
574
575		Ok(Self {
576			json_output,
577			log_file_path,
578			rotation_config:Some(rotation_config),
579			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
580			initialized:Arc::new(Mutex::new(false)),
581		})
582	}
583
584	/// Set sensitive data filter configuration
585	pub fn WithSensitiveFilter(mut self, Config:SensitiveDataConfig) -> Result<Self> {
586		self.sensitive_filter = Arc::new(SensitiveDataFilter::new(Config)?);
587
588		Ok(self)
589	}
590
591	/// Initialize the logging system with tracing
592	pub fn Initialize(&self) -> Result<()> {
593		// Check if already initialized
594		let mut initialized = self.initialized.lock().unwrap();
595
596		if *initialized {
597			return Ok(());
598		}
599
600		let filter = tracing_subscriber::EnvFilter::from_default_env()
601			.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into());
602
603		if self.json_output {
604			// JSON output format
605			let fmt_layer = tracing_subscriber::fmt::layer()
606				.json()
607				.with_current_span(true)
608				.with_span_list(false)
609				.with_target(true)
610				.with_file(true)
611				.with_line_number(true)
612				.with_writer(std::io::stderr)
613				.with_ansi(false)
614				.with_span_events(FmtSpan::FULL);
615
616			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
617
618			// Set up log file if specified
619			if let Some(ref log_path) = self.log_file_path {
620				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
621
622				let log_file = std::path::Path::new(log_path)
623					.file_name()
624					.unwrap_or(std::ffi::OsStr::new("Air.log"));
625
626				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
627
628				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
629
630				let file_layer = tracing_subscriber::fmt::layer()
631					.json()
632					.with_current_span(true)
633					.with_span_list(false)
634					.with_target(true)
635					.with_file(true)
636					.with_line_number(true)
637					.with_writer(non_blocking)
638					.with_ansi(false)
639					.with_span_events(FmtSpan::FULL);
640
641				registry.with(file_layer).init();
642			} else {
643				registry.init();
644			}
645		} else {
646			// Standard text output format
647			let fmt_layer = tracing_subscriber::fmt::layer()
648				.with_target(true)
649				.with_file(true)
650				.with_line_number(true)
651				.with_writer(std::io::stderr)
652				.with_ansi(true)
653				.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
654
655			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
656
657			// Set up log file if specified
658			if let Some(ref log_path) = self.log_file_path {
659				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
660
661				let log_file = std::path::Path::new(log_path)
662					.file_name()
663					.unwrap_or(std::ffi::OsStr::new("Air.log"));
664
665				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
666
667				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
668
669				let file_layer = tracing_subscriber::fmt::layer()
670					.with_target(true)
671					.with_file(true)
672					.with_line_number(true)
673					.with_writer(non_blocking)
674					.with_ansi(false)
675					.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
676
677				registry.with(file_layer).init();
678			} else {
679				registry.init();
680			}
681		}
682
683		*initialized = true;
684		dev_log!("air", "[Logging] ContextLogger initialized - JSON output: {}", self.json_output);
685
686		Ok(())
687	}
688
689	/// Log with context at info level
690	pub fn Info(&self, message:impl Into<String>) {
691		let msg = self.sensitive_filter.Filter(&message.into());
692
693		if let Some(Context) = GetLogContext() {
694			dev_log!(
695				"air",
696				"[{}] req={} trace={} span={} {}",
697				Context.Operation,
698				Context.RequestId,
699				Context.TraceId,
700				Context.SpanId,
701				msg
702			);
703		} else {
704			dev_log!("air", "{}", msg);
705		}
706	}
707
708	/// Log with context at debug level
709	pub fn Debug(&self, message:impl Into<String>) {
710		let msg = self.sensitive_filter.Filter(&message.into());
711
712		if let Some(Context) = GetLogContext() {
713			dev_log!(
714				"air",
715				"[{}] req={} trace={} span={} {}",
716				Context.Operation,
717				Context.RequestId,
718				Context.TraceId,
719				Context.SpanId,
720				msg
721			);
722		} else {
723			dev_log!("air", "{}", msg);
724		}
725	}
726
727	/// Log with context at warn level
728	pub fn Warn(&self, message:impl Into<String>) {
729		let msg = self.sensitive_filter.Filter(&message.into());
730
731		if let Some(Context) = GetLogContext() {
732			dev_log!(
733				"air",
734				"warn: [{}] req={} trace={} span={} {}",
735				Context.Operation,
736				Context.RequestId,
737				Context.TraceId,
738				Context.SpanId,
739				msg
740			);
741		} else {
742			dev_log!("air", "warn: {}", msg);
743		}
744	}
745
746	/// Log with context at error level
747	pub fn Error(&self, message:impl Into<String>) {
748		let msg = self.sensitive_filter.Filter(&message.into());
749
750		if let Some(Context) = GetLogContext() {
751			dev_log!(
752				"air",
753				"error: [{}] req={} trace={} span={} {}",
754				Context.Operation,
755				Context.RequestId,
756				Context.TraceId,
757				Context.SpanId,
758				msg
759			);
760		} else {
761			dev_log!("air", "error: {}", msg);
762		}
763	}
764}
765
766/// Global context logger instance
767static LOGGER_INSTANCE:std::sync::OnceLock<ContextLogger> = std::sync::OnceLock::new();
768
769/// Get the global context logger
770pub fn GetLogger() -> &'static ContextLogger { LOGGER_INSTANCE.get_or_init(|| ContextLogger::New(false, None)) }
771
772/// Initialize the global context logger
773pub fn InitializeLogger(json_output:bool, log_file_path:Option<String>) -> Result<()> {
774	let logger = ContextLogger::New(json_output, log_file_path);
775
776	logger.Initialize()?;
777
778	let _old = LOGGER_INSTANCE.set(logger);
779
780	Ok(())
781}
782
783/// Initialize the global context logger with rotation
784pub fn InitializeLoggerWithRotation(
785	json_output:bool,
786
787	log_file_path:Option<String>,
788
789	rotation_config:LogRotationConfig,
790
791	sensitive_config:Option<SensitiveDataConfig>,
792) -> Result<()> {
793	let mut logger = ContextLogger::WithRotation(json_output, log_file_path, rotation_config)?;
794
795	if let Some(sensitive_config) = sensitive_config {
796		logger = logger.WithSensitiveFilter(sensitive_config)?;
797	}
798
799	logger.Initialize()?;
800
801	let _old = LOGGER_INSTANCE.set(logger);
802
803	Ok(())
804}