Add Debug trait implementations and fix warnings

This commit is contained in:
pandaadir05
2025-11-20 14:27:52 +02:00
parent 2b3d81cc03
commit 6329feabbd
6 changed files with 254 additions and 142 deletions

View File

@@ -1,4 +1,4 @@
use crate::{GhostError, ProcessInfo, Result};
use crate::{ProcessInfo, Result};
use chrono::Timelike;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -41,6 +41,7 @@ pub struct ProcessProfile {
}
/// Advanced ML-based anomaly detection for process behavior
#[derive(Debug)]
pub struct AnomalyDetector {
process_profiles: HashMap<String, ProcessProfile>,
global_baseline: Option<ProcessProfile>,
@@ -69,10 +70,13 @@ impl AnomalyDetector {
) -> ProcessFeatures {
let executable_regions = memory_regions
.iter()
.filter(|r| matches!(
r.protection,
crate::MemoryProtection::ReadExecute | crate::MemoryProtection::ReadWriteExecute
))
.filter(|r| {
matches!(
r.protection,
crate::MemoryProtection::ReadExecute
| crate::MemoryProtection::ReadWriteExecute
)
})
.count();
let rwx_regions = memory_regions
@@ -91,11 +95,7 @@ impl AnomalyDetector {
.count();
let total_memory_size: usize = memory_regions.iter().map(|r| r.size).sum();
let largest_region_size = memory_regions
.iter()
.map(|r| r.size)
.max()
.unwrap_or(0);
let largest_region_size = memory_regions.iter().map(|r| r.size).max().unwrap_or(0);
// Calculate memory fragmentation (std dev of region sizes)
let mean_size = if memory_regions.is_empty() {
@@ -117,10 +117,7 @@ impl AnomalyDetector {
// Thread-based features
let thread_creation_rate = if let Some(thread_list) = threads {
let recent_threads = thread_list
.iter()
.filter(|t| t.creation_time > 0)
.count();
let recent_threads = thread_list.iter().filter(|t| t.creation_time > 0).count();
recent_threads as f64 / thread_list.len().max(1) as f64
} else {
0.0
@@ -243,12 +240,12 @@ impl AnomalyDetector {
.iter()
.map(|(feature, score)| {
let weight = match feature.as_str() {
"rwx_regions" => 0.3, // High weight for RWX regions
"rwx_regions" => 0.3, // High weight for RWX regions
"thread_creation_rate" => 0.25, // High weight for thread anomalies
"entropy_score" => 0.2, // Medium weight for entropy
"api_call_frequency" => 0.15, // Medium weight for API calls
"memory_fragmentation" => 0.1, // Lower weight for fragmentation
_ => 0.05, // Low weight for other features
"entropy_score" => 0.2, // Medium weight for entropy
"api_call_frequency" => 0.15, // Medium weight for API calls
"memory_fragmentation" => 0.1, // Lower weight for fragmentation
_ => 0.05, // Low weight for other features
};
score * weight
})
@@ -259,8 +256,8 @@ impl AnomalyDetector {
// Calculate confidence based on sample size and feature coverage
let confidence = if let Some(profile) = baseline {
(profile.sample_count as f64 / 100.0).min(1.0) *
(component_scores.len() as f64 / 6.0).min(1.0)
(profile.sample_count as f64 / 100.0).min(1.0)
* (component_scores.len() as f64 / 6.0).min(1.0)
} else {
0.0
};
@@ -288,12 +285,12 @@ impl AnomalyDetector {
if std > 0.0 {
// Calculate z-score
let z_score = (value - mean).abs() / std;
// Convert z-score to anomaly score (0-1)
let anomaly_score = (z_score / 4.0).min(1.0); // Cap at 4 standard deviations
component_scores.insert(feature_name.to_string(), anomaly_score);
// Mark as outlier if beyond threshold
if z_score > self.outlier_threshold {
outlier_features.push(format!(
@@ -334,31 +331,52 @@ impl AnomalyDetector {
for (feature_name, value) in feature_values {
// Update mean
let old_mean = profile.feature_means.get(feature_name).copied().unwrap_or(0.0);
let old_mean = profile
.feature_means
.get(feature_name)
.copied()
.unwrap_or(0.0);
let new_mean = old_mean + (value - old_mean) / n;
profile.feature_means.insert(feature_name.to_string(), new_mean);
profile
.feature_means
.insert(feature_name.to_string(), new_mean);
// Update standard deviation (using variance)
if n > 1.0 {
let old_std = profile.feature_stds.get(feature_name).copied().unwrap_or(0.0);
let old_std = profile
.feature_stds
.get(feature_name)
.copied()
.unwrap_or(0.0);
let old_variance = old_std * old_std;
let new_variance = ((n - 2.0) * old_variance + (value - old_mean) * (value - new_mean)) / (n - 1.0);
let new_variance = ((n - 2.0) * old_variance
+ (value - old_mean) * (value - new_mean))
/ (n - 1.0);
let new_std = new_variance.max(0.0).sqrt();
profile.feature_stds.insert(feature_name.to_string(), new_std);
profile
.feature_stds
.insert(feature_name.to_string(), new_std);
}
}
profile.last_updated = chrono::Utc::now();
}
fn estimate_api_call_frequency(&self, _process: &ProcessInfo, memory_regions: &[crate::MemoryRegion]) -> f64 {
fn estimate_api_call_frequency(
&self,
_process: &ProcessInfo,
memory_regions: &[crate::MemoryRegion],
) -> f64 {
// Heuristic: More executable regions might indicate more API calls
let executable_count = memory_regions
.iter()
.filter(|r| matches!(
r.protection,
crate::MemoryProtection::ReadExecute | crate::MemoryProtection::ReadWriteExecute
))
.filter(|r| {
matches!(
r.protection,
crate::MemoryProtection::ReadExecute
| crate::MemoryProtection::ReadWriteExecute
)
})
.count();
(executable_count as f64 / memory_regions.len().max(1) as f64) * 100.0
@@ -407,4 +425,4 @@ impl Default for AnomalyDetector {
fn default() -> Self {
Self::new()
}
}
}

View File

@@ -1,7 +1,7 @@
use crate::{ProcessInfo, MemoryRegion, ThreadInfo, GhostError};
use crate::{GhostError, MemoryRegion, ProcessInfo, ThreadInfo};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::{SystemTime, Duration};
use std::time::{Duration, SystemTime};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AdvancedBehavioralML {
@@ -169,16 +169,16 @@ impl AdvancedBehavioralML {
) -> Result<BehavioralAnalysisResult, GhostError> {
// Extract behavioral features
let features = self.extract_features(process, memory_regions, threads)?;
// Run ensemble prediction
let threat_probability = self.predict_threat(&features).await?;
// Detect anomalies
let anomalies = self.detect_anomalies(&features)?;
// Predict techniques
let predicted_techniques = self.predict_techniques(&features)?;
Ok(BehavioralAnalysisResult {
threat_probability,
predicted_techniques,
@@ -204,21 +204,26 @@ impl AdvancedBehavioralML {
_threads: &[ThreadInfo],
) -> Result<Vec<f32>, GhostError> {
let mut features = Vec::new();
// Basic process features
features.push(process.pid as f32);
features.push(memory_regions.len() as f32);
// Memory protection features
let rwx_count = memory_regions.iter()
.filter(|r| r.protection.is_readable() && r.protection.is_writable() && r.protection.is_executable())
let rwx_count = memory_regions
.iter()
.filter(|r| {
r.protection.is_readable()
&& r.protection.is_writable()
&& r.protection.is_executable()
})
.count() as f32;
features.push(rwx_count);
// Size distribution
let total_size: u64 = memory_regions.iter().map(|r| r.size as u64).sum();
features.push(total_size as f32);
Ok(features)
}
@@ -246,4 +251,4 @@ impl AdvancedBehavioralML {
pub fn get_statistics(&self) -> HashMap<String, ModelPerformance> {
self.statistics.model_performance.clone()
}
}
}

View File

@@ -4,18 +4,19 @@
//! analysis techniques including memory scanning, shellcode detection,
//! process hollowing detection, and behavioral anomaly analysis.
use crate::{
detect_hook_injection, AnomalyDetector, DetectionConfig, EvasionDetector, EvasionResult,
GhostError, HollowingDetector, MemoryProtection, MemoryRegion, MitreAnalysisResult, MitreAttackEngine,
ProcessInfo, ShellcodeDetector, ThreadInfo, ThreatContext, ThreatIntelligence,
};
#[cfg(target_os = "linux")]
use crate::EbpfDetector;
use crate::{
detect_hook_injection, AnomalyDetector, DetectionConfig, EvasionDetector, EvasionResult,
GhostError, HollowingDetector, MemoryProtection, MemoryRegion, MitreAnalysisResult,
MitreAttackEngine, ProcessInfo, ShellcodeDetector, ThreadInfo, ThreatContext,
ThreatIntelligence,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Threat classification levels for detected processes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)]
pub enum ThreatLevel {
/// Process appears normal with no suspicious indicators.
Clean,
@@ -56,6 +57,7 @@ pub struct DetectionResult {
}
/// Main detection engine that orchestrates all analysis components.
#[derive(Debug)]
pub struct DetectionEngine {
baseline: HashMap<u32, ProcessBaseline>,
shellcode_detector: ShellcodeDetector,
@@ -180,22 +182,22 @@ impl DetectionEngine {
indicators.push(format!("{} new threads created", diff));
confidence += 0.2;
}
// Detect significant RWX increase (possible injection)
if rwx_count > baseline.rwx_regions + 1 {
indicators.push("Rapid RWX region allocation".to_string());
confidence += 0.5;
}
}
// Check for unusual memory patterns
self.check_memory_patterns(memory_regions, &mut indicators, &mut confidence);
// Analyze threads if provided
if let Some(thread_list) = threads {
self.analyze_threads(thread_list, &mut indicators, &mut confidence);
}
// Check for Windows hook injection
if let Ok(hook_result) = detect_hook_injection(process.pid) {
if hook_result.suspicious_count > 0 {
@@ -205,13 +207,13 @@ impl DetectionEngine {
));
confidence += 0.6; // High confidence for hook-based injection
}
if hook_result.global_hooks > 8 {
indicators.push("Excessive global hooks (possible system compromise)".to_string());
confidence += 0.3;
}
}
// Scan for shellcode patterns in executable memory regions
let shellcode_detections = self.scan_for_shellcode(memory_regions);
if !shellcode_detections.is_empty() {
@@ -224,34 +226,41 @@ impl DetectionEngine {
confidence += detection.confidence;
}
}
// Check for process hollowing
if let Ok(Some(hollowing_detection)) = self.hollowing_detector.analyze_process(process, memory_regions) {
if let Ok(Some(hollowing_detection)) = self
.hollowing_detector
.analyze_process(process, memory_regions)
{
for indicator in &hollowing_detection.indicators {
indicators.push(format!("Process hollowing: {}", indicator));
}
confidence += hollowing_detection.confidence;
}
// ML-based anomaly detection
let features = self.anomaly_detector.extract_features(process, memory_regions, threads);
let features = self
.anomaly_detector
.extract_features(process, memory_regions, threads);
if let Ok(anomaly_score) = self.anomaly_detector.analyze_anomaly(process, &features) {
if self.anomaly_detector.is_anomalous(&anomaly_score) {
indicators.push(format!(
"ML anomaly detected: {:.1}% confidence",
anomaly_score.overall_score * 100.0
));
for outlier in &anomaly_score.outlier_features {
indicators.push(format!("Outlier: {}", outlier));
}
confidence += (anomaly_score.overall_score * anomaly_score.confidence) as f32;
}
}
// Advanced evasion detection
let evasion_result = self.evasion_detector.analyze_evasion(process, memory_regions, threads.unwrap_or(&[]));
let evasion_result =
self.evasion_detector
.analyze_evasion(process, memory_regions, threads.unwrap_or(&[]));
if evasion_result.confidence > 0.3 {
for technique in &evasion_result.evasion_techniques {
indicators.push(format!(
@@ -260,14 +269,14 @@ impl DetectionEngine {
technique.confidence * 100.0
));
}
for indicator in &evasion_result.anti_analysis_indicators {
indicators.push(format!("Anti-analysis: {}", indicator));
}
// Increase confidence based on evasion sophistication
confidence += evasion_result.confidence * 0.4;
// Boost threat level for sophisticated evasion
if evasion_result.sophistication_score > 0.7 {
confidence += 0.2; // Additional boost for advanced evasion
@@ -291,7 +300,10 @@ impl DetectionEngine {
};
// Create initial detection result
let mut detection_result = DetectionResult {
// Enrich with threat intelligence (async operation would be handled by caller)
// For now, we'll set a placeholder that can be enriched later
DetectionResult {
process: process.clone(),
threat_level,
indicators,
@@ -299,17 +311,16 @@ impl DetectionEngine {
threat_context: None,
evasion_analysis: None,
mitre_analysis: None,
};
// Enrich with threat intelligence (async operation would be handled by caller)
// For now, we'll set a placeholder that can be enriched later
detection_result
}
}
/// Enrich detection result with threat intelligence
pub async fn enrich_with_threat_intel(&self, mut detection: DetectionResult) -> DetectionResult {
pub async fn enrich_with_threat_intel(
&self,
mut detection: DetectionResult,
) -> DetectionResult {
let threat_context = self.threat_intelligence.enrich_detection(&detection).await;
// Update threat level based on threat intelligence findings
if threat_context.risk_score > 0.8 {
detection.threat_level = ThreatLevel::Malicious;
@@ -321,11 +332,15 @@ impl DetectionEngine {
// Add threat intelligence indicators
for ioc in &threat_context.matched_iocs {
detection.indicators.push(format!("IOC Match: {} ({})", ioc.value, ioc.source));
detection
.indicators
.push(format!("IOC Match: {} ({})", ioc.value, ioc.source));
}
if let Some(actor) = &threat_context.threat_actor {
detection.indicators.push(format!("Attributed to: {}", actor.name));
detection
.indicators
.push(format!("Attributed to: {}", actor.name));
}
detection.threat_context = Some(threat_context);
@@ -341,19 +356,23 @@ impl DetectionEngine {
) -> DetectionResult {
// Perform standard detection
let mut detection_result = self.analyze_process(process, memory_regions, Some(threads));
// Add evasion analysis
let evasion_result = self.evasion_detector.analyze_evasion(process, memory_regions, threads);
let evasion_result =
self.evasion_detector
.analyze_evasion(process, memory_regions, threads);
// Update threat level based on evasion analysis
if evasion_result.confidence > 0.7 {
detection_result.threat_level = ThreatLevel::Malicious;
detection_result.confidence = (detection_result.confidence + evasion_result.confidence) / 2.0;
detection_result.confidence =
(detection_result.confidence + evasion_result.confidence) / 2.0;
} else if evasion_result.confidence > 0.4 {
detection_result.threat_level = ThreatLevel::Suspicious;
detection_result.confidence = (detection_result.confidence + evasion_result.confidence * 0.7) / 2.0;
detection_result.confidence =
(detection_result.confidence + evasion_result.confidence * 0.7) / 2.0;
}
detection_result.evasion_analysis = Some(evasion_result);
detection_result
}
@@ -365,7 +384,7 @@ impl DetectionEngine {
match ebpf_detector.process_events() {
Ok(ebpf_events) => {
let mut detection_results = Vec::new();
for ebpf_event in ebpf_events {
// Convert eBPF detection event to standard DetectionResult
let detection_result = DetectionResult {
@@ -382,10 +401,10 @@ impl DetectionEngine {
threat_context: None,
evasion_analysis: None,
};
detection_results.push(detection_result);
}
Ok(detection_results)
}
Err(e) => {
@@ -401,7 +420,9 @@ impl DetectionEngine {
/// Get eBPF detector statistics (Linux only)
#[cfg(target_os = "linux")]
pub fn get_ebpf_statistics(&self) -> Option<crate::ebpf::EbpfStatistics> {
self.ebpf_detector.as_ref().map(|detector| detector.get_statistics())
self.ebpf_detector
.as_ref()
.map(|detector| detector.get_statistics())
}
/// Check for suspicious memory patterns
@@ -429,7 +450,7 @@ impl DetectionEngine {
// Check for memory gaps that might indicate hollowing
let mut sorted_regions: Vec<_> = regions.iter().collect();
sorted_regions.sort_by_key(|r| r.base_address);
for window in sorted_regions.windows(2) {
let gap = window[1].base_address - (window[0].base_address + window[0].size);
if gap > 0x100000 && gap < 0x1000000 {
@@ -466,10 +487,7 @@ impl DetectionEngine {
}
// Check for abnormal thread creation time patterns
let recent_threads = threads
.iter()
.filter(|t| t.creation_time > 0)
.count();
let recent_threads = threads.iter().filter(|t| t.creation_time > 0).count();
if recent_threads as f32 / threads.len() as f32 > 0.5 {
indicators.push("High ratio of recently created threads".to_string());
@@ -536,7 +554,9 @@ impl DetectionEngine {
memory_regions: &[MemoryRegion],
threads: &[ThreadInfo],
) -> Result<MitreAnalysisResult, GhostError> {
self.mitre_engine.analyze_attack_patterns(process, memory_regions, threads).await
self.mitre_engine
.analyze_attack_patterns(process, memory_regions, threads)
.await
}
/// Enrich detection result with MITRE ATT&CK analysis
@@ -546,14 +566,18 @@ impl DetectionEngine {
memory_regions: &[MemoryRegion],
threads: &[ThreadInfo],
) -> DetectionResult {
if let Ok(mitre_analysis) = self.mitre_engine.analyze_attack_patterns(&detection.process, memory_regions, threads).await {
if let Ok(mitre_analysis) = self
.mitre_engine
.analyze_attack_patterns(&detection.process, memory_regions, threads)
.await
{
// Update threat level based on MITRE analysis
if mitre_analysis.risk_assessment.overall_risk_score > 0.8 {
detection.threat_level = ThreatLevel::Malicious;
} else if mitre_analysis.risk_assessment.overall_risk_score > 0.5 {
if detection.threat_level == ThreatLevel::Clean {
detection.threat_level = ThreatLevel::Suspicious;
}
} else if mitre_analysis.risk_assessment.overall_risk_score > 0.5
&& detection.threat_level == ThreatLevel::Clean
{
detection.threat_level = ThreatLevel::Suspicious;
}
// Add MITRE technique indicators
@@ -576,7 +600,8 @@ impl DetectionEngine {
}
// Update confidence with MITRE insights
detection.confidence = (detection.confidence + mitre_analysis.risk_assessment.overall_risk_score) / 2.0;
detection.confidence =
(detection.confidence + mitre_analysis.risk_assessment.overall_risk_score) / 2.0;
detection.mitre_analysis = Some(mitre_analysis);
}

View File

@@ -1,7 +1,14 @@
// eBPF module - currently stub implementation for Linux
// Most functionality not yet implemented
#[cfg(target_os = "linux")]
use crate::ProcessInfo;
#[cfg(target_os = "linux")]
use std::collections::HashMap;
#[cfg(target_os = "linux")]
use std::sync::{Arc, Mutex};
use std::time::{SystemTime, Duration};
use crate::{ProcessInfo, MemoryRegion, DetectionResult, ThreatLevel};
#[cfg(target_os = "linux")]
use std::time::{Duration, SystemTime};
/// Linux eBPF-based Process Injection Detection
/// Provides kernel-level tracing and detection capabilities on Linux systems
@@ -444,7 +451,10 @@ pub enum FilterCondition {
ProcessName(String),
ProcessId(u32),
UserId(u32),
EventFrequency { max_events: u32, time_window: Duration },
EventFrequency {
max_events: u32,
time_window: Duration,
},
MemoryThreshold(u64),
FilePattern(String),
NetworkDestination(String),
@@ -504,7 +514,7 @@ impl EbpfDetector {
// Set up event processing
self.setup_event_handlers()?;
// Configure default filters
self.setup_default_filters()?;
@@ -625,10 +635,8 @@ impl EbpfDetector {
Box::new(ProcessCreateHandler::new()),
);
self.event_processor.register_handler(
EventType::MemoryMap,
Box::new(MemoryMapHandler::new()),
);
self.event_processor
.register_handler(EventType::MemoryMap, Box::new(MemoryMapHandler::new()));
self.event_processor.register_handler(
EventType::MemoryProtect,
@@ -684,7 +692,7 @@ impl EbpfDetector {
/// Process events from the ring buffer
pub fn process_events(&mut self) -> Result<Vec<DetectionEvent>, EbpfError> {
let mut detection_events = Vec::new();
let events = {
let mut buffer = self.ring_buffer.lock().unwrap();
buffer.drain_events()
@@ -746,8 +754,8 @@ impl EbpfDetector {
loaded_programs: self.program_manager.loaded_programs.len(),
total_events_processed: 0, // Would be tracked in real implementation
detections_generated: 0, // Would be tracked in real implementation
filter_efficiency: 0.0, // Would be calculated in real implementation
performance_impact: 0.0, // Would be measured in real implementation
filter_efficiency: 0.0, // Would be calculated in real implementation
performance_impact: 0.0, // Would be measured in real implementation
}
}
}
@@ -966,4 +974,4 @@ impl EbpfDetector {
pub fn initialize(&mut self) -> Result<(), &'static str> {
Err("eBPF detection is only supported on Linux")
}
}
}

View File

@@ -81,4 +81,4 @@ impl From<toml::de::Error> for GhostError {
}
/// Type alias for Result with GhostError as the error type.
pub type Result<T> = std::result::Result<T, GhostError>;
pub type Result<T> = std::result::Result<T, GhostError>;

View File

@@ -1,10 +1,11 @@
use std::collections::HashMap;
use std::time::{SystemTime, Duration};
use crate::{MemoryProtection, MemoryRegion, ProcessInfo, ThreadInfo};
use serde::{Deserialize, Serialize};
use crate::{ProcessInfo, MemoryRegion, ThreadInfo, MemoryProtection};
use std::collections::HashMap;
use std::time::{Duration, SystemTime};
/// Advanced Evasion Detection Module
/// Detects sophisticated anti-analysis and evasion techniques
#[derive(Debug)]
pub struct EvasionDetector {
timing_analyzer: TimingAnalyzer,
environment_checker: EnvironmentChecker,
@@ -32,13 +33,14 @@ pub struct EvasionTechnique {
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum EvasionSeverity {
Low, // Basic evasion attempts
Medium, // Moderate sophistication
High, // Advanced techniques
Critical, // Nation-state level evasion
Low, // Basic evasion attempts
Medium, // Moderate sophistication
High, // Advanced techniques
Critical, // Nation-state level evasion
}
/// Timing-based evasion detection
#[derive(Debug)]
pub struct TimingAnalyzer {
execution_timings: HashMap<u32, Vec<ExecutionTiming>>,
sleep_patterns: HashMap<u32, Vec<SleepPattern>>,
@@ -76,6 +78,7 @@ pub enum SleepContext {
}
/// Environment-based evasion detection
#[derive(Debug)]
pub struct EnvironmentChecker {
vm_indicators: Vec<VmIndicator>,
debugger_checks: Vec<DebuggerCheck>,
@@ -91,11 +94,11 @@ pub struct VmIndicator {
#[derive(Debug, Clone)]
pub enum VmIndicatorType {
ProcessName, // VM-related processes
RegistryKey, // VM registry artifacts
FilePath, // VM file system artifacts
HardwareId, // VM hardware identifiers
Timing, // VM timing anomalies
ProcessName, // VM-related processes
RegistryKey, // VM registry artifacts
FilePath, // VM file system artifacts
HardwareId, // VM hardware identifiers
Timing, // VM timing anomalies
}
#[derive(Debug, Clone)]
@@ -117,10 +120,10 @@ pub enum DebuggerCheckType {
#[derive(Debug, Clone)]
pub enum BypassDifficulty {
Trivial, // Easy to bypass
Moderate, // Requires knowledge
Difficult, // Advanced techniques needed
Expert, // Very sophisticated bypass required
Trivial, // Easy to bypass
Moderate, // Requires knowledge
Difficult, // Advanced techniques needed
Expert, // Very sophisticated bypass required
}
#[derive(Debug, Clone)]
@@ -131,6 +134,7 @@ pub struct SandboxSignature {
}
/// Behavioral analysis for evasion detection
#[derive(Debug)]
pub struct BehaviorAnalyzer {
api_hooking_detector: ApiHookingDetector,
execution_flow_analyzer: ExecutionFlowAnalyzer,
@@ -572,6 +576,7 @@ pub enum CleanupMethod {
}
/// Code obfuscation and packing detection
#[derive(Debug)]
pub struct ObfuscationDetector {
packer_signatures: Vec<PackerSignature>,
obfuscation_patterns: Vec<ObfuscationPattern>,
@@ -647,6 +652,12 @@ pub enum KeyDerivation {
UserInput,
}
impl Default for EvasionDetector {
fn default() -> Self {
Self::new()
}
}
impl EvasionDetector {
pub fn new() -> Self {
Self {
@@ -670,7 +681,9 @@ impl EvasionDetector {
let mut anti_analysis_indicators = Vec::new();
// Timing-based evasion analysis
let timing_result = self.timing_analyzer.analyze_timing_evasion(process, threads);
let timing_result = self
.timing_analyzer
.analyze_timing_evasion(process, threads);
if !timing_result.techniques.is_empty() {
evasion_techniques.extend(timing_result.techniques);
confidence += timing_result.confidence * 0.3;
@@ -686,9 +699,9 @@ impl EvasionDetector {
}
// Behavioral analysis
let behavior_result = self.behavior_analyzer.analyze_behavior_evasion(
process, memory_regions, threads
);
let behavior_result =
self.behavior_analyzer
.analyze_behavior_evasion(process, memory_regions, threads);
if !behavior_result.techniques.is_empty() {
evasion_techniques.extend(behavior_result.techniques);
confidence += behavior_result.confidence * 0.25;
@@ -696,9 +709,9 @@ impl EvasionDetector {
}
// Obfuscation analysis
let obfuscation_result = self.obfuscation_detector.detect_obfuscation(
process, memory_regions
);
let obfuscation_result = self
.obfuscation_detector
.detect_obfuscation(process, memory_regions);
if !obfuscation_result.techniques.is_empty() {
evasion_techniques.extend(obfuscation_result.techniques);
confidence += obfuscation_result.confidence * 0.15;
@@ -724,6 +737,12 @@ impl EvasionDetector {
}
}
impl Default for TimingAnalyzer {
fn default() -> Self {
Self::new()
}
}
impl TimingAnalyzer {
pub fn new() -> Self {
Self {
@@ -810,6 +829,12 @@ struct TimingEvasionResult {
indicators: Vec<String>,
}
impl Default for EnvironmentChecker {
fn default() -> Self {
Self::new()
}
}
impl EnvironmentChecker {
pub fn new() -> Self {
Self {
@@ -910,6 +935,12 @@ struct EnvironmentEvasionResult {
indicators: Vec<String>,
}
impl Default for BehaviorAnalyzer {
fn default() -> Self {
Self::new()
}
}
impl BehaviorAnalyzer {
pub fn new() -> Self {
Self {
@@ -939,9 +970,10 @@ impl BehaviorAnalyzer {
}
// Execution flow analysis
if let Some(flow_evasion) = self.execution_flow_analyzer.analyze_execution_flow(
process, memory_regions
) {
if let Some(flow_evasion) = self
.execution_flow_analyzer
.analyze_execution_flow(process, memory_regions)
{
techniques.push(flow_evasion);
confidence += 0.5;
sophistication += 0.8;
@@ -965,6 +997,12 @@ struct BehaviorEvasionResult {
indicators: Vec<String>,
}
impl Default for ApiHookingDetector {
fn default() -> Self {
Self::new()
}
}
impl ApiHookingDetector {
pub fn new() -> Self {
Self {
@@ -990,6 +1028,12 @@ impl ApiHookingDetector {
}
}
impl Default for ExecutionFlowAnalyzer {
fn default() -> Self {
Self::new()
}
}
impl ExecutionFlowAnalyzer {
pub fn new() -> Self {
Self {
@@ -1031,6 +1075,12 @@ impl ExecutionFlowAnalyzer {
}
}
impl Default for ResourceUsageMonitor {
fn default() -> Self {
Self::new()
}
}
impl ResourceUsageMonitor {
pub fn new() -> Self {
Self {
@@ -1064,6 +1114,12 @@ impl ResourceUsageMonitor {
}
}
impl Default for ObfuscationDetector {
fn default() -> Self {
Self::new()
}
}
impl ObfuscationDetector {
pub fn new() -> Self {
Self {
@@ -1152,4 +1208,4 @@ struct ObfuscationEvasionResult {
confidence: f32,
sophistication: f32,
indicators: Vec<String>,
}
}