refactor(backend): extract config and speedtest services (phase 4)
This commit continues the backend refactoring initiative by extracting configuration management and API speedtest logic into dedicated service layers, completing phase 4 of the architectural improvement plan. ## Changes ### New Service Layers - **ConfigService** (`services/config.rs`): Consolidates all config import/export, backup management, and live sync operations - `create_backup()`: Creates timestamped backups with auto-cleanup - `export_config_to_path()`: Exports config to specified path - `load_config_for_import()`: Loads and validates imported config - `import_config_from_path()`: Full import with state update - `sync_current_providers_to_live()`: Syncs current providers to live files - Private helpers for Claude/Codex-specific sync logic - **SpeedtestService** (`services/speedtest.rs`): Encapsulates endpoint latency testing with proper validation and error handling - `test_endpoints()`: Tests multiple URLs concurrently - URL validation now unified in service layer - Includes 3 unit tests for edge cases (empty list, invalid URLs, timeout clamping) ### Command Layer Refactoring - Move all import/export commands to `commands/import_export.rs` - Commands become thin wrappers: parse params → call service → return JSON - Maintain `spawn_blocking` for I/O operations (phase 5 optimization) - Lock acquisition happens after I/O completes (minimize contention) ### File Organization - Delete: `import_export.rs`, `speedtest.rs` (root-level modules) - Create: `commands/import_export.rs`, `services/config.rs`, `services/speedtest.rs` - Update: Module declarations in `lib.rs`, `commands/mod.rs`, `services/mod.rs` ### Test Updates - Update 20 integration tests in `import_export_sync.rs` to use `ConfigService` APIs - All existing test cases pass without modification to test logic - Add 3 new unit tests for `SpeedtestService`: - `sanitize_timeout_clamps_values`: Boundary value testing - `test_endpoints_handles_empty_list`: Empty input handling - `test_endpoints_reports_invalid_url`: Invalid URL error reporting ## Benefits 1. **Improved Testability**: Service methods are `pub fn`, easily callable from tests without Tauri runtime 2. **Better Separation of Concerns**: Business logic isolated from command/transport layer 3. **Enhanced Maintainability**: Related operations grouped in cohesive service structs 4. **Consistent Error Handling**: Services return `Result<T, AppError>`, commands convert to `Result<T, String>` 5. **Performance**: I/O operations run in `spawn_blocking`, locks released before file operations ## Testing - ✅ All 43 tests passing (7 unit + 36 integration) - ✅ `cargo fmt --check` passes - ✅ `cargo clippy -- -D warnings` passes (zero warnings) ## Documentation Updated `BACKEND_REFACTOR_PLAN.md` to reflect completion of config and speedtest service extraction, marking phase 4 substantially complete. Co-authored-by: Claude Code <code@anthropic.com>
This commit is contained in:
229
src-tauri/src/services/config.rs
Normal file
229
src-tauri/src/services/config.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
use crate::app_config::{AppType, MultiAppConfig};
|
||||
use crate::error::AppError;
|
||||
use crate::provider::Provider;
|
||||
use crate::store::AppState;
|
||||
use chrono::Utc;
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
const MAX_BACKUPS: usize = 10;
|
||||
|
||||
/// 配置导入导出相关业务逻辑
|
||||
pub struct ConfigService;
|
||||
|
||||
impl ConfigService {
|
||||
/// 为当前 config.json 创建备份,返回备份 ID(若文件不存在则返回空字符串)。
|
||||
pub fn create_backup(config_path: &Path) -> Result<String, AppError> {
|
||||
if !config_path.exists() {
|
||||
return Ok(String::new());
|
||||
}
|
||||
|
||||
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
|
||||
let backup_id = format!("backup_{}", timestamp);
|
||||
|
||||
let backup_dir = config_path
|
||||
.parent()
|
||||
.ok_or_else(|| AppError::Config("Invalid config path".into()))?
|
||||
.join("backups");
|
||||
|
||||
fs::create_dir_all(&backup_dir).map_err(|e| AppError::io(&backup_dir, e))?;
|
||||
|
||||
let backup_path = backup_dir.join(format!("{}.json", backup_id));
|
||||
let contents = fs::read(config_path).map_err(|e| AppError::io(config_path, e))?;
|
||||
fs::write(&backup_path, contents).map_err(|e| AppError::io(&backup_path, e))?;
|
||||
|
||||
Self::cleanup_old_backups(&backup_dir, MAX_BACKUPS)?;
|
||||
|
||||
Ok(backup_id)
|
||||
}
|
||||
|
||||
fn cleanup_old_backups(backup_dir: &Path, retain: usize) -> Result<(), AppError> {
|
||||
if retain == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let entries = match fs::read_dir(backup_dir) {
|
||||
Ok(iter) => iter
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry
|
||||
.path()
|
||||
.extension()
|
||||
.map(|ext| ext == "json")
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
Err(_) => return Ok(()),
|
||||
};
|
||||
|
||||
if entries.len() <= retain {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let remove_count = entries.len().saturating_sub(retain);
|
||||
let mut sorted = entries;
|
||||
|
||||
sorted.sort_by(|a, b| {
|
||||
let a_time = a.metadata().and_then(|m| m.modified()).ok();
|
||||
let b_time = b.metadata().and_then(|m| m.modified()).ok();
|
||||
a_time.cmp(&b_time)
|
||||
});
|
||||
|
||||
for entry in sorted.into_iter().take(remove_count) {
|
||||
if let Err(err) = fs::remove_file(entry.path()) {
|
||||
log::warn!(
|
||||
"Failed to remove old backup {}: {}",
|
||||
entry.path().display(),
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 将当前 config.json 拷贝到目标路径。
|
||||
pub fn export_config_to_path(target_path: &Path) -> Result<(), AppError> {
|
||||
let config_path = crate::config::get_app_config_path();
|
||||
let config_content =
|
||||
fs::read_to_string(&config_path).map_err(|e| AppError::io(&config_path, e))?;
|
||||
fs::write(target_path, config_content).map_err(|e| AppError::io(target_path, e))
|
||||
}
|
||||
|
||||
/// 从磁盘文件加载配置并写回 config.json,返回备份 ID 及新配置。
|
||||
pub fn load_config_for_import(file_path: &Path) -> Result<(MultiAppConfig, String), AppError> {
|
||||
let import_content =
|
||||
fs::read_to_string(file_path).map_err(|e| AppError::io(file_path, e))?;
|
||||
|
||||
let new_config: MultiAppConfig =
|
||||
serde_json::from_str(&import_content).map_err(|e| AppError::json(file_path, e))?;
|
||||
|
||||
let config_path = crate::config::get_app_config_path();
|
||||
let backup_id = Self::create_backup(&config_path)?;
|
||||
|
||||
fs::write(&config_path, &import_content).map_err(|e| AppError::io(&config_path, e))?;
|
||||
|
||||
Ok((new_config, backup_id))
|
||||
}
|
||||
|
||||
/// 将外部配置文件内容加载并写入应用状态。
|
||||
pub fn import_config_from_path(file_path: &Path, state: &AppState) -> Result<String, AppError> {
|
||||
let (new_config, backup_id) = Self::load_config_for_import(file_path)?;
|
||||
|
||||
{
|
||||
let mut guard = state.config.write().map_err(AppError::from)?;
|
||||
*guard = new_config;
|
||||
}
|
||||
|
||||
Ok(backup_id)
|
||||
}
|
||||
|
||||
/// 同步当前供应商到对应的 live 配置。
|
||||
pub fn sync_current_providers_to_live(config: &mut MultiAppConfig) -> Result<(), AppError> {
|
||||
Self::sync_current_provider_for_app(config, &AppType::Claude)?;
|
||||
Self::sync_current_provider_for_app(config, &AppType::Codex)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sync_current_provider_for_app(
|
||||
config: &mut MultiAppConfig,
|
||||
app_type: &AppType,
|
||||
) -> Result<(), AppError> {
|
||||
let (current_id, provider) = {
|
||||
let manager = match config.get_manager(app_type) {
|
||||
Some(manager) => manager,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if manager.current.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let current_id = manager.current.clone();
|
||||
let provider = match manager.providers.get(¤t_id) {
|
||||
Some(provider) => provider.clone(),
|
||||
None => {
|
||||
log::warn!(
|
||||
"当前应用 {:?} 的供应商 {} 不存在,跳过 live 同步",
|
||||
app_type,
|
||||
current_id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
(current_id, provider)
|
||||
};
|
||||
|
||||
match app_type {
|
||||
AppType::Codex => Self::sync_codex_live(config, ¤t_id, &provider)?,
|
||||
AppType::Claude => Self::sync_claude_live(config, ¤t_id, &provider)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sync_codex_live(
|
||||
config: &mut MultiAppConfig,
|
||||
provider_id: &str,
|
||||
provider: &Provider,
|
||||
) -> Result<(), AppError> {
|
||||
let settings = provider.settings_config.as_object().ok_or_else(|| {
|
||||
AppError::Config(format!("供应商 {} 的 Codex 配置必须是对象", provider_id))
|
||||
})?;
|
||||
let auth = settings.get("auth").ok_or_else(|| {
|
||||
AppError::Config(format!(
|
||||
"供应商 {} 的 Codex 配置缺少 auth 字段",
|
||||
provider_id
|
||||
))
|
||||
})?;
|
||||
if !auth.is_object() {
|
||||
return Err(AppError::Config(format!(
|
||||
"供应商 {} 的 Codex auth 配置必须是 JSON 对象",
|
||||
provider_id
|
||||
)));
|
||||
}
|
||||
let cfg_text = settings.get("config").and_then(Value::as_str);
|
||||
|
||||
crate::codex_config::write_codex_live_atomic(auth, cfg_text)?;
|
||||
crate::mcp::sync_enabled_to_codex(config)?;
|
||||
|
||||
let cfg_text_after = crate::codex_config::read_and_validate_codex_config_text()?;
|
||||
if let Some(manager) = config.get_manager_mut(&AppType::Codex) {
|
||||
if let Some(target) = manager.providers.get_mut(provider_id) {
|
||||
if let Some(obj) = target.settings_config.as_object_mut() {
|
||||
obj.insert(
|
||||
"config".to_string(),
|
||||
serde_json::Value::String(cfg_text_after),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sync_claude_live(
|
||||
config: &mut MultiAppConfig,
|
||||
provider_id: &str,
|
||||
provider: &Provider,
|
||||
) -> Result<(), AppError> {
|
||||
use crate::config::{read_json_file, write_json_file};
|
||||
|
||||
let settings_path = crate::config::get_claude_settings_path();
|
||||
if let Some(parent) = settings_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| AppError::io(parent, e))?;
|
||||
}
|
||||
|
||||
write_json_file(&settings_path, &provider.settings_config)?;
|
||||
|
||||
let live_after = read_json_file::<serde_json::Value>(&settings_path)?;
|
||||
if let Some(manager) = config.get_manager_mut(&AppType::Claude) {
|
||||
if let Some(target) = manager.providers.get_mut(provider_id) {
|
||||
target.settings_config = live_after;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
pub mod config;
|
||||
pub mod mcp;
|
||||
pub mod provider;
|
||||
pub mod speedtest;
|
||||
|
||||
pub use config::ConfigService;
|
||||
pub use mcp::McpService;
|
||||
pub use provider::ProviderService;
|
||||
pub use speedtest::{EndpointLatency, SpeedtestService};
|
||||
|
||||
168
src-tauri/src/services/speedtest.rs
Normal file
168
src-tauri/src/services/speedtest.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
use futures::future::join_all;
|
||||
use reqwest::{Client, Url};
|
||||
use serde::Serialize;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
const DEFAULT_TIMEOUT_SECS: u64 = 8;
|
||||
const MAX_TIMEOUT_SECS: u64 = 30;
|
||||
const MIN_TIMEOUT_SECS: u64 = 2;
|
||||
|
||||
/// 端点测速结果
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct EndpointLatency {
|
||||
pub url: String,
|
||||
pub latency: Option<u128>,
|
||||
pub status: Option<u16>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
/// 网络测速相关业务
|
||||
pub struct SpeedtestService;
|
||||
|
||||
impl SpeedtestService {
|
||||
/// 测试一组端点的响应延迟。
|
||||
pub async fn test_endpoints(
|
||||
urls: Vec<String>,
|
||||
timeout_secs: Option<u64>,
|
||||
) -> Result<Vec<EndpointLatency>, AppError> {
|
||||
if urls.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let timeout = Self::sanitize_timeout(timeout_secs);
|
||||
let client = Self::build_client(timeout)?;
|
||||
|
||||
let tasks = urls.into_iter().map(|raw_url| {
|
||||
let client = client.clone();
|
||||
async move {
|
||||
let trimmed = raw_url.trim().to_string();
|
||||
if trimmed.is_empty() {
|
||||
return EndpointLatency {
|
||||
url: raw_url,
|
||||
latency: None,
|
||||
status: None,
|
||||
error: Some("URL 不能为空".to_string()),
|
||||
};
|
||||
}
|
||||
|
||||
let parsed_url = match Url::parse(&trimmed) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
return EndpointLatency {
|
||||
url: trimmed,
|
||||
latency: None,
|
||||
status: None,
|
||||
error: Some(format!("URL 无效: {err}")),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// 先进行一次热身请求,忽略结果,仅用于复用连接/绕过首包惩罚。
|
||||
let _ = client.get(parsed_url.clone()).send().await;
|
||||
|
||||
// 第二次请求开始计时,并将其作为结果返回。
|
||||
let start = Instant::now();
|
||||
match client.get(parsed_url).send().await {
|
||||
Ok(resp) => EndpointLatency {
|
||||
url: trimmed,
|
||||
latency: Some(start.elapsed().as_millis()),
|
||||
status: Some(resp.status().as_u16()),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => {
|
||||
let status = err.status().map(|s| s.as_u16());
|
||||
let error_message = if err.is_timeout() {
|
||||
"请求超时".to_string()
|
||||
} else if err.is_connect() {
|
||||
"连接失败".to_string()
|
||||
} else {
|
||||
err.to_string()
|
||||
};
|
||||
|
||||
EndpointLatency {
|
||||
url: trimmed,
|
||||
latency: None,
|
||||
status,
|
||||
error: Some(error_message),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(join_all(tasks).await)
|
||||
}
|
||||
|
||||
fn build_client(timeout_secs: u64) -> Result<Client, AppError> {
|
||||
Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.redirect(reqwest::redirect::Policy::limited(5))
|
||||
.user_agent("cc-switch-speedtest/1.0")
|
||||
.build()
|
||||
.map_err(|e| AppError::Message(format!("创建 HTTP 客户端失败: {e}")))
|
||||
}
|
||||
|
||||
fn sanitize_timeout(timeout_secs: Option<u64>) -> u64 {
|
||||
let secs = timeout_secs.unwrap_or(DEFAULT_TIMEOUT_SECS);
|
||||
secs.clamp(MIN_TIMEOUT_SECS, MAX_TIMEOUT_SECS)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn sanitize_timeout_clamps_values() {
|
||||
assert_eq!(
|
||||
SpeedtestService::sanitize_timeout(Some(1)),
|
||||
MIN_TIMEOUT_SECS
|
||||
);
|
||||
assert_eq!(
|
||||
SpeedtestService::sanitize_timeout(Some(999)),
|
||||
MAX_TIMEOUT_SECS
|
||||
);
|
||||
assert_eq!(
|
||||
SpeedtestService::sanitize_timeout(Some(10)),
|
||||
10.min(MAX_TIMEOUT_SECS).max(MIN_TIMEOUT_SECS)
|
||||
);
|
||||
assert_eq!(
|
||||
SpeedtestService::sanitize_timeout(None),
|
||||
DEFAULT_TIMEOUT_SECS
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_endpoints_handles_empty_list() {
|
||||
let result =
|
||||
tauri::async_runtime::block_on(SpeedtestService::test_endpoints(Vec::new(), Some(5)))
|
||||
.expect("empty list should succeed");
|
||||
assert!(result.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_endpoints_reports_invalid_url() {
|
||||
let result = tauri::async_runtime::block_on(SpeedtestService::test_endpoints(
|
||||
vec!["not a url".into(), "".into()],
|
||||
None,
|
||||
))
|
||||
.expect("invalid inputs should still succeed");
|
||||
|
||||
assert_eq!(result.len(), 2);
|
||||
assert!(
|
||||
result[0]
|
||||
.error
|
||||
.as_deref()
|
||||
.unwrap_or_default()
|
||||
.starts_with("URL 无效"),
|
||||
"invalid url should yield parse error"
|
||||
);
|
||||
assert_eq!(
|
||||
result[1].error.as_deref(),
|
||||
Some("URL 不能为空"),
|
||||
"empty url should report validation error"
|
||||
);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user