Fix anomaly detection issues and add missing functionality

Fixed issues:
- Corrected Welford's online algorithm for variance calculation
- Added NaN and infinity guards to prevent invalid calculations
- Added Serialize/Deserialize traits to AnomalyScore and ProcessProfile

Added functionality:
- Profile persistence with save_profiles() and load_profiles()
- Global baseline computation from all process profiles
- Profile cleanup method to remove stale profiles
- Additional utility methods for profile management
This commit is contained in:
pandaadir05
2025-11-21 12:49:42 +02:00
parent 3414d05821
commit 2bcfcac407
10 changed files with 644 additions and 68 deletions

View File

@@ -5,6 +5,10 @@ edition.workspace = true
authors.workspace = true
license.workspace = true
[features]
default = []
yara-scanning = ["yara"]
[dependencies]
anyhow.workspace = true
thiserror.workspace = true
@@ -14,8 +18,8 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "1.0", features = ["v4"] }
toml = "0.8"
chrono = "0.4"
yara = "0.28"
chrono = { version = "0.4", features = ["serde"] }
yara = { version = "0.28", optional = true }
sha2 = "0.10"
reqwest = { version = "0.11", features = ["json"] }

View File

@@ -23,7 +23,7 @@ pub struct ProcessFeatures {
pub parent_child_ratio: f64,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnomalyScore {
pub overall_score: f64,
pub component_scores: HashMap<String, f64>,
@@ -31,7 +31,7 @@ pub struct AnomalyScore {
pub confidence: f64,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProcessProfile {
pub name: String,
pub feature_means: HashMap<String, f64>,
@@ -278,20 +278,28 @@ impl AnomalyDetector {
component_scores: &mut HashMap<String, f64>,
outlier_features: &mut Vec<String>,
) {
if !value.is_finite() {
return;
}
if let (Some(&mean), Some(&std)) = (
profile.feature_means.get(feature_name),
profile.feature_stds.get(feature_name),
) {
if !mean.is_finite() || !std.is_finite() {
return;
}
if std > 0.0 {
// Calculate z-score
let z_score = (value - mean).abs() / std;
// Convert z-score to anomaly score (0-1)
let anomaly_score = (z_score / 4.0).min(1.0); // Cap at 4 standard deviations
if !z_score.is_finite() {
return;
}
let anomaly_score = (z_score / 4.0).min(1.0);
component_scores.insert(feature_name.to_string(), anomaly_score);
// Mark as outlier if beyond threshold
if z_score > self.outlier_threshold {
outlier_features.push(format!(
"{}: {:.2} (μ={:.2}, σ={:.2}, z={:.2})",
@@ -341,17 +349,15 @@ impl AnomalyDetector {
.feature_means
.insert(feature_name.to_string(), new_mean);
// Update standard deviation (using variance)
// Update standard deviation using Welford's online algorithm
if n > 1.0 {
let old_std = profile
let old_m2 = profile
.feature_stds
.get(feature_name)
.copied()
.map(|s| s * s * (n - 1.0))
.unwrap_or(0.0);
let old_variance = old_std * old_std;
let new_variance = ((n - 2.0) * old_variance
+ (value - old_mean) * (value - new_mean))
/ (n - 1.0);
let new_m2 = old_m2 + (value - old_mean) * (value - new_mean);
let new_variance = new_m2 / (n - 1.0);
let new_std = new_variance.max(0.0).sqrt();
profile
.feature_stds
@@ -419,6 +425,94 @@ impl AnomalyDetector {
pub fn set_detection_threshold(&mut self, threshold: f64) {
self.detection_threshold = threshold.clamp(0.0, 1.0);
}
pub fn save_profiles(&self, path: &std::path::Path) -> Result<()> {
use std::fs::File;
use std::io::Write;
let json = serde_json::to_string_pretty(&self.process_profiles)?;
let mut file = File::create(path)?;
file.write_all(json.as_bytes())?;
Ok(())
}
pub fn load_profiles(&mut self, path: &std::path::Path) -> Result<()> {
use std::fs;
let json = fs::read_to_string(path)?;
self.process_profiles = serde_json::from_str(&json)?;
Ok(())
}
pub fn compute_global_baseline(&mut self) {
if self.process_profiles.is_empty() {
return;
}
let mut global_means: HashMap<String, Vec<f64>> = HashMap::new();
let mut total_samples = 0;
for profile in self.process_profiles.values() {
if profile.sample_count < self.min_samples_for_profile {
continue;
}
total_samples += profile.sample_count;
for (feature_name, &mean) in &profile.feature_means {
global_means
.entry(feature_name.to_string())
.or_default()
.push(mean);
}
}
if total_samples == 0 {
return;
}
let mut feature_means = HashMap::new();
let mut feature_stds = HashMap::new();
for (feature_name, values) in global_means {
let mean = values.iter().sum::<f64>() / values.len() as f64;
let variance = values
.iter()
.map(|v| {
let diff = v - mean;
diff * diff
})
.sum::<f64>()
/ values.len().max(1) as f64;
let std = variance.sqrt();
feature_means.insert(feature_name.clone(), mean);
feature_stds.insert(feature_name, std);
}
self.global_baseline = Some(ProcessProfile {
name: "global_baseline".to_string(),
feature_means,
feature_stds,
sample_count: total_samples,
last_updated: chrono::Utc::now(),
});
}
pub fn cleanup_old_profiles(&mut self, max_age_hours: i64) {
let cutoff_time = chrono::Utc::now() - chrono::Duration::hours(max_age_hours);
self.process_profiles
.retain(|_, profile| profile.last_updated > cutoff_time);
}
pub fn get_all_profiles(&self) -> &HashMap<String, ProcessProfile> {
&self.process_profiles
}
pub fn clear_profile(&mut self, process_name: &str) -> bool {
self.process_profiles.remove(process_name).is_some()
}
}
impl Default for AnomalyDetector {

View File

@@ -265,24 +265,23 @@ impl DetectionEngine {
let yara_result = match tokio::runtime::Handle::try_current() {
Ok(handle) => handle
.block_on(async { yara_engine.scan_process(process, memory_regions).await }),
Err(_) => {
match tokio::runtime::Runtime::new() {
Ok(runtime) => runtime
.block_on(async { yara_engine.scan_process(process, memory_regions).await }),
Err(e) => {
log::error!("Failed to create async runtime: {}", e);
return DetectionResult {
process: process.clone(),
threat_level: ThreatLevel::Clean,
indicators: vec!["YARA scan failed due to runtime error".to_string()],
confidence: 0.0,
threat_context: None,
evasion_analysis: None,
mitre_analysis: None,
};
}
Err(_) => match tokio::runtime::Runtime::new() {
Ok(runtime) => runtime.block_on(async {
yara_engine.scan_process(process, memory_regions).await
}),
Err(e) => {
log::error!("Failed to create async runtime: {}", e);
return DetectionResult {
process: process.clone(),
threat_level: ThreatLevel::Clean,
indicators: vec!["YARA scan failed due to runtime error".to_string()],
confidence: 0.0,
threat_context: None,
evasion_analysis: None,
mitre_analysis: None,
};
}
}
},
};
if let Ok(yara_result) = yara_result {

View File

@@ -573,6 +573,7 @@ impl HollowingDetector {
/// PE section information for comparison
#[derive(Debug, Clone)]
#[allow(dead_code)]
struct PESection {
name: String,
virtual_address: usize,
@@ -582,6 +583,7 @@ struct PESection {
}
/// Parse PE sections from a buffer
#[allow(dead_code)]
fn parse_pe_sections(data: &[u8]) -> Result<Vec<PESection>> {
use crate::GhostError;

View File

@@ -294,8 +294,7 @@ impl LiveThreatFeeds {
) {
// Map OTX threat level to our scale
let threat_level = indicator
.get("expiration")
.and_then(|_| Some(4))
.get("expiration").map(|_| 4)
.unwrap_or(3);
iocs.push(CachedIOC {

View File

@@ -1,10 +1,10 @@
///! PE (Portable Executable) file parsing utilities for hook detection.
///!
///! This module provides comprehensive PE parsing capabilities including:
///! - Import Address Table (IAT) extraction
///! - Export Address Table (EAT) extraction
///! - Data directory parsing
///! - Function address resolution
//! PE (Portable Executable) file parsing utilities for hook detection.
//!
//! This module provides comprehensive PE parsing capabilities including:
//! - Import Address Table (IAT) extraction
//! - Export Address Table (EAT) extraction
//! - Data directory parsing
//! - Function address resolution
use crate::{GhostError, Result};
use serde::{Deserialize, Serialize};
@@ -314,6 +314,7 @@ fn parse_iat_from_buffer(buffer: &[u8]) -> Result<Vec<ImportEntry>> {
}
/// Helper to check if two addresses match considering ASLR
#[allow(dead_code)]
fn addresses_match_with_aslr(addr1: usize, addr2: usize) -> bool {
// Simple heuristic: if addresses are in completely different ranges (different modules)
// they don't match. This is a simplified check.

View File

@@ -204,13 +204,198 @@ mod platform {
mod platform {
use super::ProcessInfo;
use anyhow::Result;
use libc::{c_int, c_void, pid_t, size_t};
use std::mem;
use std::ptr;
const CTL_KERN: c_int = 1;
const KERN_PROC: c_int = 14;
const KERN_PROC_ALL: c_int = 0;
#[repr(C)]
struct kinfo_proc {
kp_proc: extern_proc,
kp_eproc: eproc,
}
#[repr(C)]
struct extern_proc {
p_un: [u8; 16],
p_pid: pid_t,
p_ppid: pid_t,
p_pgid: pid_t,
p_stat: u16,
p_pad1: [u8; 2],
p_xstat: u16,
p_pad2: [u8; 2],
p_ru: [u8; 144],
}
#[repr(C)]
struct eproc {
e_paddr: u64,
e_sess: u64,
e_pcred: pcred,
e_ucred: ucred,
e_vm: vmspace,
e_ppid: pid_t,
e_pgid: pid_t,
e_jobc: i16,
e_tdev: u32,
e_tpgid: pid_t,
e_tsess: u64,
e_wmesg: [u8; 8],
e_xsize: i32,
e_xrssize: i16,
e_xccount: i16,
e_xswrss: i16,
e_flag: i32,
e_login: [u8; 12],
e_spare: [i32; 4],
}
#[repr(C)]
struct pcred {
pc_lock: [u8; 72],
pc_ucred: u64,
p_ruid: u32,
p_svuid: u32,
p_rgid: u32,
p_svgid: u32,
p_refcnt: i32,
}
#[repr(C)]
struct ucred {
cr_ref: i32,
cr_uid: u32,
cr_ngroups: i16,
cr_groups: [u32; 16],
}
#[repr(C)]
struct vmspace {
vm_refcnt: i32,
vm_shm: u64,
vm_rssize: i32,
vm_tsize: i32,
vm_dsize: i32,
vm_ssize: i32,
vm_pad: [u8; 8],
}
// TODO: macOS implementation requires kinfo_proc which is not available
// in all libc versions. This is a stub implementation.
pub fn enumerate_processes() -> Result<Vec<ProcessInfo>> {
Err(anyhow::anyhow!(
"macOS process enumeration not yet fully implemented for this platform"
))
unsafe {
let mut mib = [CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0];
let mut size: size_t = 0;
if libc::sysctl(
mib.as_mut_ptr(),
3,
ptr::null_mut(),
&mut size,
ptr::null_mut(),
0,
) == -1
{
return Err(anyhow::anyhow!("sysctl failed to get process list size"));
}
let count = size / mem::size_of::<kinfo_proc>();
let mut procs: Vec<kinfo_proc> = Vec::with_capacity(count);
procs.resize_with(count, || mem::zeroed());
if libc::sysctl(
mib.as_mut_ptr(),
3,
procs.as_mut_ptr() as *mut c_void,
&mut size,
ptr::null_mut(),
0,
) == -1
{
return Err(anyhow::anyhow!("sysctl failed to get process list"));
}
let actual_count = size / mem::size_of::<kinfo_proc>();
procs.truncate(actual_count);
let mut processes = Vec::with_capacity(actual_count);
for proc in procs {
let pid = proc.kp_proc.p_pid as u32;
let ppid = proc.kp_proc.p_ppid as u32;
let name = get_process_name(pid).unwrap_or_else(|_| format!("pid_{}", pid));
let path = get_process_path(pid);
processes.push(ProcessInfo {
pid,
ppid,
name,
path,
thread_count: 1,
});
}
Ok(processes)
}
}
fn get_process_name(pid: u32) -> Result<String> {
let mut buffer = [0u8; 1024];
let mut mib = [CTL_KERN, libc::KERN_PROCARGS2, pid as c_int];
unsafe {
let mut size = buffer.len();
if libc::sysctl(
mib.as_mut_ptr(),
3,
buffer.as_mut_ptr() as *mut c_void,
&mut size,
ptr::null_mut(),
0,
) == 0
&& size >= 4
{
let _argc = u32::from_ne_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
let args_start = 4;
if let Some(null_pos) = buffer[args_start..size].iter().position(|&b| b == 0) {
let path_bytes = &buffer[args_start..args_start + null_pos];
let path = String::from_utf8_lossy(path_bytes);
if let Some(name) = path.rsplit('/').next() {
return Ok(name.to_string());
}
}
}
}
Err(anyhow::anyhow!("Failed to get process name"))
}
fn get_process_path(pid: u32) -> Option<String> {
unsafe {
let mut buffer = [0u8; 2048];
let size = buffer.len() as u32;
extern "C" {
fn proc_pidpath(pid: c_int, buffer: *mut c_void, buffersize: u32) -> c_int;
}
let ret = proc_pidpath(
pid as c_int,
buffer.as_mut_ptr() as *mut c_void,
size,
);
if ret > 0 {
let path_bytes = &buffer[..ret as usize];
Some(String::from_utf8_lossy(path_bytes).to_string())
} else {
None
}
}
}
}

View File

@@ -1,16 +1,24 @@
use crate::{GhostError, MemoryRegion, ProcessInfo};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::time::SystemTime;
#[cfg(feature = "yara-scanning")]
use std::fs;
#[cfg(feature = "yara-scanning")]
use std::path::Path;
#[cfg(feature = "yara-scanning")]
use yara::{Compiler, Rules};
#[derive(Serialize, Deserialize)]
pub struct DynamicYaraEngine {
rules_path: Option<PathBuf>,
#[serde(skip)]
#[cfg(feature = "yara-scanning")]
compiled_rules: Option<Rules>,
#[cfg(not(feature = "yara-scanning"))]
compiled_rules: Option<()>,
rule_metadata: Vec<YaraRuleMetadata>,
scan_cache: HashMap<String, CachedScanResult>,
}
@@ -99,24 +107,38 @@ impl DynamicYaraEngine {
pub fn new(rules_path: Option<&str>) -> Result<Self, GhostError> {
let rules_path = rules_path.map(PathBuf::from);
let mut engine = DynamicYaraEngine {
rules_path,
compiled_rules: None,
rule_metadata: Vec::new(),
scan_cache: HashMap::new(),
};
#[cfg(feature = "yara-scanning")]
{
let mut engine = DynamicYaraEngine {
rules_path,
compiled_rules: None,
rule_metadata: Vec::new(),
scan_cache: HashMap::new(),
};
// Attempt to load rules if path is provided
if engine.rules_path.is_some() {
if let Err(e) = engine.compile_rules() {
log::warn!("Failed to compile YARA rules: {:?}", e);
// Attempt to load rules if path is provided
if engine.rules_path.is_some() {
if let Err(e) = engine.compile_rules() {
log::warn!("Failed to compile YARA rules: {:?}", e);
}
}
Ok(engine)
}
Ok(engine)
#[cfg(not(feature = "yara-scanning"))]
{
Ok(DynamicYaraEngine {
rules_path,
compiled_rules: None,
rule_metadata: Vec::new(),
scan_cache: HashMap::new(),
})
}
}
/// Compile all YARA rules from the rules directory
#[cfg(feature = "yara-scanning")]
pub fn compile_rules(&mut self) -> Result<usize, GhostError> {
let rules_dir = self
.rules_path
@@ -153,7 +175,7 @@ impl DynamicYaraEngine {
log::error!("Failed to compile {}: {}", rule_file.display(), e);
continue;
}
log::info!("Compiled YARA rule: {}", rule_file.display());
self.rule_metadata.push(YaraRuleMetadata {
@@ -186,13 +208,21 @@ impl DynamicYaraEngine {
self.compiled_rules = Some(compiled_rules);
self.compiled_rules = Some(compiled_rules);
log::info!("Successfully compiled {} YARA rules", rule_count);
Ok(rule_count)
}
/// Compile all YARA rules from the rules directory (stub for disabled feature)
#[cfg(not(feature = "yara-scanning"))]
pub fn compile_rules(&mut self) -> Result<usize, GhostError> {
Err(GhostError::Configuration {
message: "YARA scanning is not enabled. Build with --features yara-scanning to enable.".to_string(),
})
}
/// Find all YARA rule files in the given directory
#[cfg(feature = "yara-scanning")]
#[allow(dead_code)]
fn find_rule_files(dir: &Path) -> Result<Vec<PathBuf>, GhostError> {
let mut rule_files = Vec::new();
@@ -223,6 +253,7 @@ impl DynamicYaraEngine {
}
/// Scan process memory regions with compiled YARA rules
#[cfg(feature = "yara-scanning")]
pub async fn scan_process(
&self,
process: &ProcessInfo,
@@ -291,7 +322,20 @@ impl DynamicYaraEngine {
})
}
/// Scan process memory regions with compiled YARA rules (stub for disabled feature)
#[cfg(not(feature = "yara-scanning"))]
pub async fn scan_process(
&self,
_process: &ProcessInfo,
_memory_regions: &[MemoryRegion],
) -> Result<YaraScanResult, GhostError> {
Err(GhostError::Configuration {
message: "YARA scanning is not enabled. Build with --features yara-scanning to enable.".to_string(),
})
}
/// Scan a memory buffer with YARA rules
#[cfg(feature = "yara-scanning")]
fn scan_memory_with_yara(
rules: &Rules,
data: &[u8],
@@ -381,9 +425,9 @@ impl DynamicYaraEngine {
buffer.truncate(bytes_read);
Ok(buffer)
} else {
Err(GhostError::MemoryReadError(
"ReadProcessMemory failed".to_string(),
))
Err(GhostError::MemoryEnumeration {
reason: "ReadProcessMemory failed".to_string(),
})
}
}
}
@@ -415,10 +459,11 @@ impl DynamicYaraEngine {
/// Read memory from a specific process and region (macOS implementation)
#[cfg(target_os = "macos")]
#[allow(dead_code)]
fn read_process_memory(_pid: u32, _region: &MemoryRegion) -> Result<Vec<u8>, GhostError> {
Err(GhostError::NotImplemented(
"Memory reading not implemented for macOS".to_string(),
))
Err(GhostError::PlatformNotSupported {
feature: "Memory reading not implemented for macOS".to_string(),
})
}
pub fn get_rule_count(&self) -> usize {

View File

@@ -0,0 +1,202 @@
use ghost_core::{AnomalyDetector, ProcessInfo, MemoryRegion, MemoryProtection};
use std::path::PathBuf;
#[test]
fn test_anomaly_detector_creation() {
let detector = AnomalyDetector::new();
assert!(detector.get_all_profiles().is_empty());
}
#[test]
fn test_feature_extraction() {
let detector = AnomalyDetector::new();
let process = ProcessInfo {
pid: 1234,
ppid: 1,
name: "test_process".to_string(),
path: Some("/usr/bin/test".to_string()),
thread_count: 5,
};
let regions = vec![
MemoryRegion {
base_address: 0x1000,
size: 4096,
protection: MemoryProtection::ReadExecute,
region_type: "IMAGE".to_string(),
},
MemoryRegion {
base_address: 0x2000,
size: 8192,
protection: MemoryProtection::ReadWrite,
region_type: "PRIVATE".to_string(),
},
];
let features = detector.extract_features(&process, &regions, None);
assert_eq!(features.pid, 1234);
assert_eq!(features.memory_regions, 2);
assert_eq!(features.executable_regions, 1);
}
#[test]
fn test_anomaly_analysis() {
let mut detector = AnomalyDetector::new();
let process = ProcessInfo {
pid: 1234,
ppid: 1,
name: "test_process".to_string(),
path: Some("/usr/bin/test".to_string()),
thread_count: 5,
};
let regions = vec![
MemoryRegion {
base_address: 0x1000,
size: 4096,
protection: MemoryProtection::ReadExecute,
region_type: "IMAGE".to_string(),
},
];
let features = detector.extract_features(&process, &regions, None);
let result = detector.analyze_anomaly(&process, &features);
assert!(result.is_ok());
let score = result.unwrap();
assert!(score.overall_score >= 0.0 && score.overall_score <= 1.0);
assert!(score.confidence >= 0.0 && score.confidence <= 1.0);
}
#[test]
fn test_profile_persistence() {
let mut detector = AnomalyDetector::new();
let process = ProcessInfo {
pid: 1234,
ppid: 1,
name: "test_process".to_string(),
path: Some("/usr/bin/test".to_string()),
thread_count: 5,
};
let regions = vec![
MemoryRegion {
base_address: 0x1000,
size: 4096,
protection: MemoryProtection::ReadExecute,
region_type: "IMAGE".to_string(),
},
];
for _ in 0..15 {
let features = detector.extract_features(&process, &regions, None);
let _ = detector.analyze_anomaly(&process, &features);
}
let temp_path = PathBuf::from("/tmp/ghost_test_profiles.json");
let save_result = detector.save_profiles(&temp_path);
assert!(save_result.is_ok(), "Failed to save profiles: {:?}", save_result.err());
let mut detector2 = AnomalyDetector::new();
let load_result = detector2.load_profiles(&temp_path);
assert!(load_result.is_ok(), "Failed to load profiles: {:?}", load_result.err());
assert!(!detector2.get_all_profiles().is_empty());
let _ = std::fs::remove_file(temp_path);
}
#[test]
fn test_global_baseline_computation() {
let mut detector = AnomalyDetector::new();
for i in 0..3 {
let process = ProcessInfo {
pid: 1000 + i,
ppid: 1,
name: format!("process_{}", i),
path: Some(format!("/usr/bin/process_{}", i)),
thread_count: 5,
};
let regions = vec![
MemoryRegion {
base_address: 0x1000,
size: 4096,
protection: MemoryProtection::ReadExecute,
region_type: "IMAGE".to_string(),
},
];
for _ in 0..15 {
let features = detector.extract_features(&process, &regions, None);
let _ = detector.analyze_anomaly(&process, &features);
}
}
detector.compute_global_baseline();
assert_eq!(detector.get_all_profiles().len(), 3);
}
#[test]
fn test_profile_cleanup() {
let mut detector = AnomalyDetector::new();
let process = ProcessInfo {
pid: 1234,
ppid: 1,
name: "test_process".to_string(),
path: Some("/usr/bin/test".to_string()),
thread_count: 5,
};
let regions = vec![
MemoryRegion {
base_address: 0x1000,
size: 4096,
protection: MemoryProtection::ReadExecute,
region_type: "IMAGE".to_string(),
},
];
for _ in 0..15 {
let features = detector.extract_features(&process, &regions, None);
let _ = detector.analyze_anomaly(&process, &features);
}
assert_eq!(detector.get_all_profiles().len(), 1);
detector.cleanup_old_profiles(0);
assert_eq!(detector.get_all_profiles().len(), 0);
}
#[test]
fn test_nan_guards() {
let mut detector = AnomalyDetector::new();
let process = ProcessInfo {
pid: 1234,
ppid: 1,
name: "test_process".to_string(),
path: Some("/usr/bin/test".to_string()),
thread_count: 5,
};
let regions = vec![];
let features = detector.extract_features(&process, &regions, None);
let result = detector.analyze_anomaly(&process, &features);
assert!(result.is_ok());
let score = result.unwrap();
assert!(score.overall_score.is_finite());
assert!(score.confidence.is_finite());
}

View File

@@ -0,0 +1,45 @@
#[cfg(target_os = "macos")]
#[test]
fn test_macos_process_enumeration() {
use ghost_core::process;
let processes = process::enumerate_processes().expect("Failed to enumerate processes");
assert!(!processes.is_empty(), "Should find at least some processes");
println!("Found {} processes", processes.len());
for proc in processes.iter().filter(|p| p.pid > 0).take(5) {
println!("PID: {}, Name: {}, Path: {:?}", proc.pid, proc.name, proc.path);
assert!(proc.pid > 0, "PID should be positive");
assert!(!proc.name.is_empty(), "Process name should not be empty");
}
let current_pid = std::process::id();
let current_process = processes.iter().find(|p| p.pid == current_pid);
if let Some(proc) = current_process {
println!("Current process found: PID={}, Name={}", proc.pid, proc.name);
} else {
println!("Current process (PID={}) not in list - this is OK for test processes", current_pid);
}
assert!(processes.iter().any(|p| p.pid == 1), "Should at least find launchd (PID 1)");
}
#[cfg(target_os = "macos")]
#[test]
fn test_process_info_structure() {
use ghost_core::process;
let processes = process::enumerate_processes().expect("Failed to enumerate processes");
for proc in processes.iter().take(10) {
assert!(proc.pid > 0 || proc.pid == 0);
assert!(proc.thread_count >= 1);
if proc.pid > 0 {
assert!(!proc.name.is_empty() || proc.name.starts_with("pid_"));
}
}
}