refactor(backend): extract config and speedtest services (phase 4)

This commit continues the backend refactoring initiative by extracting
configuration management and API speedtest logic into dedicated service
layers, completing phase 4 of the architectural improvement plan.

## Changes

### New Service Layers
- **ConfigService** (`services/config.rs`): Consolidates all config
  import/export, backup management, and live sync operations
  - `create_backup()`: Creates timestamped backups with auto-cleanup
  - `export_config_to_path()`: Exports config to specified path
  - `load_config_for_import()`: Loads and validates imported config
  - `import_config_from_path()`: Full import with state update
  - `sync_current_providers_to_live()`: Syncs current providers to live files
  - Private helpers for Claude/Codex-specific sync logic

- **SpeedtestService** (`services/speedtest.rs`): Encapsulates endpoint
  latency testing with proper validation and error handling
  - `test_endpoints()`: Tests multiple URLs concurrently
  - URL validation now unified in service layer
  - Includes 3 unit tests for edge cases (empty list, invalid URLs, timeout clamping)

### Command Layer Refactoring
- Move all import/export commands to `commands/import_export.rs`
- Commands become thin wrappers: parse params → call service → return JSON
- Maintain `spawn_blocking` for I/O operations (phase 5 optimization)
- Lock acquisition happens after I/O completes (minimize contention)

### File Organization
- Delete: `import_export.rs`, `speedtest.rs` (root-level modules)
- Create: `commands/import_export.rs`, `services/config.rs`, `services/speedtest.rs`
- Update: Module declarations in `lib.rs`, `commands/mod.rs`, `services/mod.rs`

### Test Updates
- Update 20 integration tests in `import_export_sync.rs` to use `ConfigService` APIs
- All existing test cases pass without modification to test logic
- Add 3 new unit tests for `SpeedtestService`:
  - `sanitize_timeout_clamps_values`: Boundary value testing
  - `test_endpoints_handles_empty_list`: Empty input handling
  - `test_endpoints_reports_invalid_url`: Invalid URL error reporting

## Benefits

1. **Improved Testability**: Service methods are `pub fn`, easily callable
   from tests without Tauri runtime
2. **Better Separation of Concerns**: Business logic isolated from
   command/transport layer
3. **Enhanced Maintainability**: Related operations grouped in cohesive
   service structs
4. **Consistent Error Handling**: Services return `Result<T, AppError>`,
   commands convert to `Result<T, String>`
5. **Performance**: I/O operations run in `spawn_blocking`, locks released
   before file operations

## Testing

-  All 43 tests passing (7 unit + 36 integration)
-  `cargo fmt --check` passes
-  `cargo clippy -- -D warnings` passes (zero warnings)

## Documentation

Updated `BACKEND_REFACTOR_PLAN.md` to reflect completion of config and
speedtest service extraction, marking phase 4 substantially complete.

Co-authored-by: Claude Code <code@anthropic.com>
This commit is contained in:
Jason
2025-10-28 15:58:04 +08:00
parent 9e72e786e3
commit 88a952023f
11 changed files with 533 additions and 463 deletions

View File

@@ -0,0 +1,104 @@
#![allow(non_snake_case)]
use serde_json::{json, Value};
use std::path::PathBuf;
use tauri::State;
use tauri_plugin_dialog::DialogExt;
use crate::error::AppError;
use crate::services::ConfigService;
use crate::store::AppState;
/// 导出配置文件
#[tauri::command]
pub async fn export_config_to_file(file_path: String) -> Result<Value, String> {
tauri::async_runtime::spawn_blocking(move || {
let target_path = PathBuf::from(&file_path);
ConfigService::export_config_to_path(&target_path)?;
Ok::<_, AppError>(json!({
"success": true,
"message": "Configuration exported successfully",
"filePath": file_path
}))
})
.await
.map_err(|e| format!("导出配置失败: {}", e))?
.map_err(|e: AppError| e.to_string())
}
/// 从文件导入配置
#[tauri::command]
pub async fn import_config_from_file(
file_path: String,
state: State<'_, AppState>,
) -> Result<Value, String> {
let (new_config, backup_id) = tauri::async_runtime::spawn_blocking(move || {
let path_buf = PathBuf::from(&file_path);
ConfigService::load_config_for_import(&path_buf)
})
.await
.map_err(|e| format!("导入配置失败: {}", e))?
.map_err(|e: AppError| e.to_string())?;
{
let mut guard = state
.config
.write()
.map_err(|e| AppError::from(e).to_string())?;
*guard = new_config;
}
Ok(json!({
"success": true,
"message": "Configuration imported successfully",
"backupId": backup_id
}))
}
/// 同步当前供应商配置到对应的 live 文件
#[tauri::command]
pub async fn sync_current_providers_live(state: State<'_, AppState>) -> Result<Value, String> {
{
let mut config_state = state
.config
.write()
.map_err(|e| AppError::from(e).to_string())?;
ConfigService::sync_current_providers_to_live(&mut config_state)
.map_err(|e| e.to_string())?;
}
Ok(json!({
"success": true,
"message": "Live configuration synchronized"
}))
}
/// 保存文件对话框
#[tauri::command]
pub async fn save_file_dialog<R: tauri::Runtime>(
app: tauri::AppHandle<R>,
default_name: String,
) -> Result<Option<String>, String> {
let dialog = app.dialog();
let result = dialog
.file()
.add_filter("JSON", &["json"])
.set_file_name(&default_name)
.blocking_save_file();
Ok(result.map(|p| p.to_string()))
}
/// 打开文件对话框
#[tauri::command]
pub async fn open_file_dialog<R: tauri::Runtime>(
app: tauri::AppHandle<R>,
) -> Result<Option<String>, String> {
let dialog = app.dialog();
let result = dialog
.file()
.add_filter("JSON", &["json"])
.blocking_pick_file();
Ok(result.map(|p| p.to_string()))
}

View File

@@ -1,6 +1,7 @@
#![allow(non_snake_case)]
mod config;
mod import_export;
mod mcp;
mod misc;
mod plugin;
@@ -8,6 +9,7 @@ mod provider;
mod settings;
pub use config::*;
pub use import_export::*;
pub use mcp::*;
pub use misc::*;
pub use plugin::*;

View File

@@ -10,8 +10,7 @@ use crate::codex_config;
use crate::config::get_claude_settings_path;
use crate::error::AppError;
use crate::provider::{Provider, ProviderMeta};
use crate::services::ProviderService;
use crate::speedtest;
use crate::services::{EndpointLatency, ProviderService, SpeedtestService};
use crate::store::AppState;
fn validate_provider_settings(app_type: &AppType, provider: &Provider) -> Result<(), String> {
@@ -572,12 +571,8 @@ pub async fn read_live_provider_settings(
pub async fn test_api_endpoints(
urls: Vec<String>,
timeout_secs: Option<u64>,
) -> Result<Vec<speedtest::EndpointLatency>, String> {
let filtered: Vec<String> = urls
.into_iter()
.filter(|url| !url.trim().is_empty())
.collect();
speedtest::test_endpoints(filtered, timeout_secs)
) -> Result<Vec<EndpointLatency>, String> {
SpeedtestService::test_endpoints(urls, timeout_secs)
.await
.map_err(|e| e.to_string())
}

View File

@@ -1,323 +0,0 @@
use crate::app_config::{AppType, MultiAppConfig};
use crate::error::AppError;
use crate::provider::Provider;
use chrono::Utc;
use serde_json::{json, Value};
use std::fs;
use std::path::{Path, PathBuf};
// 默认仅保留最近 10 份备份,避免目录无限膨胀
const MAX_BACKUPS: usize = 10;
/// 创建配置文件备份
pub fn create_backup(config_path: &PathBuf) -> Result<String, AppError> {
if !config_path.exists() {
return Ok(String::new());
}
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let backup_id = format!("backup_{}", timestamp);
let backup_dir = config_path
.parent()
.ok_or_else(|| AppError::Config("Invalid config path".into()))?
.join("backups");
// 创建备份目录
fs::create_dir_all(&backup_dir).map_err(|e| AppError::io(&backup_dir, e))?;
let backup_path = backup_dir.join(format!("{}.json", backup_id));
let contents = fs::read(config_path).map_err(|e| AppError::io(config_path, e))?;
fs::write(&backup_path, contents).map_err(|e| AppError::io(&backup_path, e))?;
// 备份完成后清理旧的备份文件(仅保留最近 MAX_BACKUPS 份)
cleanup_old_backups(&backup_dir, MAX_BACKUPS)?;
Ok(backup_id)
}
fn cleanup_old_backups(backup_dir: &PathBuf, retain: usize) -> Result<(), AppError> {
if retain == 0 {
return Ok(());
}
let mut entries: Vec<_> = match fs::read_dir(backup_dir) {
Ok(iter) => iter
.filter_map(|entry| entry.ok())
.filter(|entry| {
entry
.path()
.extension()
.map(|ext| ext == "json")
.unwrap_or(false)
})
.collect(),
Err(_) => return Ok(()),
};
if entries.len() <= retain {
return Ok(());
}
let remove_count = entries.len().saturating_sub(retain);
entries.sort_by(|a, b| {
let a_time = a.metadata().and_then(|m| m.modified()).ok();
let b_time = b.metadata().and_then(|m| m.modified()).ok();
a_time.cmp(&b_time)
});
for entry in entries.into_iter().take(remove_count) {
if let Err(err) = fs::remove_file(entry.path()) {
log::warn!(
"Failed to remove old backup {}: {}",
entry.path().display(),
err
);
}
}
Ok(())
}
pub fn sync_current_providers_to_live(config: &mut MultiAppConfig) -> Result<(), AppError> {
sync_current_provider_for_app(config, &AppType::Claude)?;
sync_current_provider_for_app(config, &AppType::Codex)?;
Ok(())
}
fn sync_current_provider_for_app(
config: &mut MultiAppConfig,
app_type: &AppType,
) -> Result<(), AppError> {
let (current_id, provider) = {
let manager = match config.get_manager(app_type) {
Some(manager) => manager,
None => return Ok(()),
};
if manager.current.is_empty() {
return Ok(());
}
let current_id = manager.current.clone();
let provider = match manager.providers.get(&current_id) {
Some(provider) => provider.clone(),
None => {
log::warn!(
"当前应用 {:?} 的供应商 {} 不存在,跳过 live 同步",
app_type,
current_id
);
return Ok(());
}
};
(current_id, provider)
};
match app_type {
AppType::Codex => sync_codex_live(config, &current_id, &provider)?,
AppType::Claude => sync_claude_live(config, &current_id, &provider)?,
}
Ok(())
}
fn sync_codex_live(
config: &mut MultiAppConfig,
provider_id: &str,
provider: &Provider,
) -> Result<(), AppError> {
use serde_json::Value;
let settings = provider.settings_config.as_object().ok_or_else(|| {
AppError::Config(format!("供应商 {} 的 Codex 配置必须是对象", provider_id))
})?;
let auth = settings.get("auth").ok_or_else(|| {
AppError::Config(format!(
"供应商 {} 的 Codex 配置缺少 auth 字段",
provider_id
))
})?;
if !auth.is_object() {
return Err(AppError::Config(format!(
"供应商 {} 的 Codex auth 配置必须是 JSON 对象",
provider_id
)));
}
let cfg_text = settings.get("config").and_then(Value::as_str);
crate::codex_config::write_codex_live_atomic(auth, cfg_text)?;
crate::mcp::sync_enabled_to_codex(config)?;
let cfg_text_after = crate::codex_config::read_and_validate_codex_config_text()?;
if let Some(manager) = config.get_manager_mut(&AppType::Codex) {
if let Some(target) = manager.providers.get_mut(provider_id) {
if let Some(obj) = target.settings_config.as_object_mut() {
obj.insert(
"config".to_string(),
serde_json::Value::String(cfg_text_after),
);
}
}
}
Ok(())
}
fn sync_claude_live(
config: &mut MultiAppConfig,
provider_id: &str,
provider: &Provider,
) -> Result<(), AppError> {
use crate::config::{read_json_file, write_json_file};
let settings_path = crate::config::get_claude_settings_path();
if let Some(parent) = settings_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| AppError::io(parent, e))?;
}
write_json_file(&settings_path, &provider.settings_config)?;
let live_after = read_json_file::<serde_json::Value>(&settings_path)?;
if let Some(manager) = config.get_manager_mut(&AppType::Claude) {
if let Some(target) = manager.providers.get_mut(provider_id) {
target.settings_config = live_after;
}
}
Ok(())
}
/// 导出配置文件
#[tauri::command]
pub async fn export_config_to_file(file_path: String) -> Result<Value, String> {
tauri::async_runtime::spawn_blocking(move || {
let config_path = crate::config::get_app_config_path();
let config_content =
fs::read_to_string(&config_path).map_err(|e| AppError::io(&config_path, e))?;
let target_path = PathBuf::from(&file_path);
fs::write(&target_path, &config_content).map_err(|e| AppError::io(&target_path, e))?;
Ok::<_, AppError>(json!({
"success": true,
"message": "Configuration exported successfully",
"filePath": file_path
}))
})
.await
.map_err(|e| format!("导出配置失败: {}", e))?
.map_err(|e: AppError| e.to_string())
}
/// 从文件导入配置
#[tauri::command]
pub async fn import_config_from_file(
file_path: String,
state: tauri::State<'_, crate::store::AppState>,
) -> Result<Value, String> {
let path_buf = PathBuf::from(&file_path);
let (new_config, backup_id) =
tauri::async_runtime::spawn_blocking(move || load_config_for_import(&path_buf))
.await
.map_err(|e| format!("导入配置失败: {}", e))?
.map_err(|e| e.to_string())?;
{
let mut guard = state
.config
.write()
.map_err(|e| AppError::from(e).to_string())?;
*guard = new_config;
}
Ok(json!({
"success": true,
"message": "Configuration imported successfully",
"backupId": backup_id
}))
}
/// 从文件导入配置的核心逻辑,供命令及测试复用。
pub fn import_config_from_path(
file_path: &Path,
state: &crate::store::AppState,
) -> Result<String, AppError> {
let (new_config, backup_id) = load_config_for_import(file_path)?;
{
let mut guard = state.config.write().map_err(AppError::from)?;
*guard = new_config;
}
Ok(backup_id)
}
fn load_config_for_import(file_path: &Path) -> Result<(MultiAppConfig, String), AppError> {
let import_content = fs::read_to_string(file_path).map_err(|e| AppError::io(file_path, e))?;
let new_config: crate::app_config::MultiAppConfig =
serde_json::from_str(&import_content).map_err(|e| AppError::json(file_path, e))?;
let config_path = crate::config::get_app_config_path();
let backup_id = create_backup(&config_path)?;
fs::write(&config_path, &import_content).map_err(|e| AppError::io(&config_path, e))?;
Ok((new_config, backup_id))
}
/// 同步当前供应商配置到对应的 live 文件
#[tauri::command]
pub async fn sync_current_providers_live(
state: tauri::State<'_, crate::store::AppState>,
) -> Result<Value, String> {
{
let mut config_state = state
.config
.write()
.map_err(|e| AppError::from(e).to_string())?;
sync_current_providers_to_live(&mut config_state).map_err(|e| e.to_string())?;
}
Ok(json!({
"success": true,
"message": "Live configuration synchronized"
}))
}
/// 保存文件对话框
#[tauri::command]
pub async fn save_file_dialog<R: tauri::Runtime>(
app: tauri::AppHandle<R>,
default_name: String,
) -> Result<Option<String>, String> {
use tauri_plugin_dialog::DialogExt;
let dialog = app.dialog();
let result = dialog
.file()
.add_filter("JSON", &["json"])
.set_file_name(&default_name)
.blocking_save_file();
Ok(result.map(|p| p.to_string()))
}
/// 打开文件对话框
#[tauri::command]
pub async fn open_file_dialog<R: tauri::Runtime>(
app: tauri::AppHandle<R>,
) -> Result<Option<String>, String> {
use tauri_plugin_dialog::DialogExt;
let dialog = app.dialog();
let result = dialog
.file()
.add_filter("JSON", &["json"])
.blocking_pick_file();
Ok(result.map(|p| p.to_string()))
}

View File

@@ -6,13 +6,11 @@ mod codex_config;
mod commands;
mod config;
mod error;
mod import_export;
mod mcp;
mod migration;
mod provider;
mod services;
mod settings;
mod speedtest;
mod store;
mod usage_script;
@@ -21,14 +19,11 @@ pub use codex_config::{get_codex_auth_path, get_codex_config_path, write_codex_l
pub use commands::*;
pub use config::{get_claude_mcp_path, get_claude_settings_path, read_json_file};
pub use error::AppError;
pub use import_export::{
create_backup, export_config_to_file, import_config_from_path, sync_current_providers_to_live,
};
pub use mcp::{
import_from_claude, import_from_codex, sync_enabled_to_claude, sync_enabled_to_codex,
};
pub use provider::Provider;
pub use services::{McpService, ProviderService};
pub use services::{ConfigService, EndpointLatency, McpService, ProviderService, SpeedtestService};
pub use settings::{update_settings, AppSettings};
pub use store::AppState;
@@ -526,11 +521,11 @@ pub fn run() {
// provider sort order management
commands::update_providers_sort_order,
// theirs: config import/export and dialogs
import_export::export_config_to_file,
import_export::import_config_from_file,
import_export::save_file_dialog,
import_export::open_file_dialog,
import_export::sync_current_providers_live,
commands::export_config_to_file,
commands::import_config_from_file,
commands::save_file_dialog,
commands::open_file_dialog,
commands::sync_current_providers_live,
update_tray_menu,
]);

View File

@@ -0,0 +1,229 @@
use crate::app_config::{AppType, MultiAppConfig};
use crate::error::AppError;
use crate::provider::Provider;
use crate::store::AppState;
use chrono::Utc;
use serde_json::Value;
use std::fs;
use std::path::Path;
const MAX_BACKUPS: usize = 10;
/// 配置导入导出相关业务逻辑
pub struct ConfigService;
impl ConfigService {
/// 为当前 config.json 创建备份,返回备份 ID若文件不存在则返回空字符串
pub fn create_backup(config_path: &Path) -> Result<String, AppError> {
if !config_path.exists() {
return Ok(String::new());
}
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
let backup_id = format!("backup_{}", timestamp);
let backup_dir = config_path
.parent()
.ok_or_else(|| AppError::Config("Invalid config path".into()))?
.join("backups");
fs::create_dir_all(&backup_dir).map_err(|e| AppError::io(&backup_dir, e))?;
let backup_path = backup_dir.join(format!("{}.json", backup_id));
let contents = fs::read(config_path).map_err(|e| AppError::io(config_path, e))?;
fs::write(&backup_path, contents).map_err(|e| AppError::io(&backup_path, e))?;
Self::cleanup_old_backups(&backup_dir, MAX_BACKUPS)?;
Ok(backup_id)
}
fn cleanup_old_backups(backup_dir: &Path, retain: usize) -> Result<(), AppError> {
if retain == 0 {
return Ok(());
}
let entries = match fs::read_dir(backup_dir) {
Ok(iter) => iter
.filter_map(|entry| entry.ok())
.filter(|entry| {
entry
.path()
.extension()
.map(|ext| ext == "json")
.unwrap_or(false)
})
.collect::<Vec<_>>(),
Err(_) => return Ok(()),
};
if entries.len() <= retain {
return Ok(());
}
let remove_count = entries.len().saturating_sub(retain);
let mut sorted = entries;
sorted.sort_by(|a, b| {
let a_time = a.metadata().and_then(|m| m.modified()).ok();
let b_time = b.metadata().and_then(|m| m.modified()).ok();
a_time.cmp(&b_time)
});
for entry in sorted.into_iter().take(remove_count) {
if let Err(err) = fs::remove_file(entry.path()) {
log::warn!(
"Failed to remove old backup {}: {}",
entry.path().display(),
err
);
}
}
Ok(())
}
/// 将当前 config.json 拷贝到目标路径。
pub fn export_config_to_path(target_path: &Path) -> Result<(), AppError> {
let config_path = crate::config::get_app_config_path();
let config_content =
fs::read_to_string(&config_path).map_err(|e| AppError::io(&config_path, e))?;
fs::write(target_path, config_content).map_err(|e| AppError::io(target_path, e))
}
/// 从磁盘文件加载配置并写回 config.json返回备份 ID 及新配置。
pub fn load_config_for_import(file_path: &Path) -> Result<(MultiAppConfig, String), AppError> {
let import_content =
fs::read_to_string(file_path).map_err(|e| AppError::io(file_path, e))?;
let new_config: MultiAppConfig =
serde_json::from_str(&import_content).map_err(|e| AppError::json(file_path, e))?;
let config_path = crate::config::get_app_config_path();
let backup_id = Self::create_backup(&config_path)?;
fs::write(&config_path, &import_content).map_err(|e| AppError::io(&config_path, e))?;
Ok((new_config, backup_id))
}
/// 将外部配置文件内容加载并写入应用状态。
pub fn import_config_from_path(file_path: &Path, state: &AppState) -> Result<String, AppError> {
let (new_config, backup_id) = Self::load_config_for_import(file_path)?;
{
let mut guard = state.config.write().map_err(AppError::from)?;
*guard = new_config;
}
Ok(backup_id)
}
/// 同步当前供应商到对应的 live 配置。
pub fn sync_current_providers_to_live(config: &mut MultiAppConfig) -> Result<(), AppError> {
Self::sync_current_provider_for_app(config, &AppType::Claude)?;
Self::sync_current_provider_for_app(config, &AppType::Codex)?;
Ok(())
}
fn sync_current_provider_for_app(
config: &mut MultiAppConfig,
app_type: &AppType,
) -> Result<(), AppError> {
let (current_id, provider) = {
let manager = match config.get_manager(app_type) {
Some(manager) => manager,
None => return Ok(()),
};
if manager.current.is_empty() {
return Ok(());
}
let current_id = manager.current.clone();
let provider = match manager.providers.get(&current_id) {
Some(provider) => provider.clone(),
None => {
log::warn!(
"当前应用 {:?} 的供应商 {} 不存在,跳过 live 同步",
app_type,
current_id
);
return Ok(());
}
};
(current_id, provider)
};
match app_type {
AppType::Codex => Self::sync_codex_live(config, &current_id, &provider)?,
AppType::Claude => Self::sync_claude_live(config, &current_id, &provider)?,
}
Ok(())
}
fn sync_codex_live(
config: &mut MultiAppConfig,
provider_id: &str,
provider: &Provider,
) -> Result<(), AppError> {
let settings = provider.settings_config.as_object().ok_or_else(|| {
AppError::Config(format!("供应商 {} 的 Codex 配置必须是对象", provider_id))
})?;
let auth = settings.get("auth").ok_or_else(|| {
AppError::Config(format!(
"供应商 {} 的 Codex 配置缺少 auth 字段",
provider_id
))
})?;
if !auth.is_object() {
return Err(AppError::Config(format!(
"供应商 {} 的 Codex auth 配置必须是 JSON 对象",
provider_id
)));
}
let cfg_text = settings.get("config").and_then(Value::as_str);
crate::codex_config::write_codex_live_atomic(auth, cfg_text)?;
crate::mcp::sync_enabled_to_codex(config)?;
let cfg_text_after = crate::codex_config::read_and_validate_codex_config_text()?;
if let Some(manager) = config.get_manager_mut(&AppType::Codex) {
if let Some(target) = manager.providers.get_mut(provider_id) {
if let Some(obj) = target.settings_config.as_object_mut() {
obj.insert(
"config".to_string(),
serde_json::Value::String(cfg_text_after),
);
}
}
}
Ok(())
}
fn sync_claude_live(
config: &mut MultiAppConfig,
provider_id: &str,
provider: &Provider,
) -> Result<(), AppError> {
use crate::config::{read_json_file, write_json_file};
let settings_path = crate::config::get_claude_settings_path();
if let Some(parent) = settings_path.parent() {
fs::create_dir_all(parent).map_err(|e| AppError::io(parent, e))?;
}
write_json_file(&settings_path, &provider.settings_config)?;
let live_after = read_json_file::<serde_json::Value>(&settings_path)?;
if let Some(manager) = config.get_manager_mut(&AppType::Claude) {
if let Some(target) = manager.providers.get_mut(provider_id) {
target.settings_config = live_after;
}
}
Ok(())
}
}

View File

@@ -1,5 +1,9 @@
pub mod config;
pub mod mcp;
pub mod provider;
pub mod speedtest;
pub use config::ConfigService;
pub use mcp::McpService;
pub use provider::ProviderService;
pub use speedtest::{EndpointLatency, SpeedtestService};

View File

@@ -0,0 +1,168 @@
use futures::future::join_all;
use reqwest::{Client, Url};
use serde::Serialize;
use std::time::{Duration, Instant};
use crate::error::AppError;
const DEFAULT_TIMEOUT_SECS: u64 = 8;
const MAX_TIMEOUT_SECS: u64 = 30;
const MIN_TIMEOUT_SECS: u64 = 2;
/// 端点测速结果
#[derive(Debug, Clone, Serialize)]
pub struct EndpointLatency {
pub url: String,
pub latency: Option<u128>,
pub status: Option<u16>,
pub error: Option<String>,
}
/// 网络测速相关业务
pub struct SpeedtestService;
impl SpeedtestService {
/// 测试一组端点的响应延迟。
pub async fn test_endpoints(
urls: Vec<String>,
timeout_secs: Option<u64>,
) -> Result<Vec<EndpointLatency>, AppError> {
if urls.is_empty() {
return Ok(vec![]);
}
let timeout = Self::sanitize_timeout(timeout_secs);
let client = Self::build_client(timeout)?;
let tasks = urls.into_iter().map(|raw_url| {
let client = client.clone();
async move {
let trimmed = raw_url.trim().to_string();
if trimmed.is_empty() {
return EndpointLatency {
url: raw_url,
latency: None,
status: None,
error: Some("URL 不能为空".to_string()),
};
}
let parsed_url = match Url::parse(&trimmed) {
Ok(url) => url,
Err(err) => {
return EndpointLatency {
url: trimmed,
latency: None,
status: None,
error: Some(format!("URL 无效: {err}")),
};
}
};
// 先进行一次热身请求,忽略结果,仅用于复用连接/绕过首包惩罚。
let _ = client.get(parsed_url.clone()).send().await;
// 第二次请求开始计时,并将其作为结果返回。
let start = Instant::now();
match client.get(parsed_url).send().await {
Ok(resp) => EndpointLatency {
url: trimmed,
latency: Some(start.elapsed().as_millis()),
status: Some(resp.status().as_u16()),
error: None,
},
Err(err) => {
let status = err.status().map(|s| s.as_u16());
let error_message = if err.is_timeout() {
"请求超时".to_string()
} else if err.is_connect() {
"连接失败".to_string()
} else {
err.to_string()
};
EndpointLatency {
url: trimmed,
latency: None,
status,
error: Some(error_message),
}
}
}
}
});
Ok(join_all(tasks).await)
}
fn build_client(timeout_secs: u64) -> Result<Client, AppError> {
Client::builder()
.timeout(Duration::from_secs(timeout_secs))
.redirect(reqwest::redirect::Policy::limited(5))
.user_agent("cc-switch-speedtest/1.0")
.build()
.map_err(|e| AppError::Message(format!("创建 HTTP 客户端失败: {e}")))
}
fn sanitize_timeout(timeout_secs: Option<u64>) -> u64 {
let secs = timeout_secs.unwrap_or(DEFAULT_TIMEOUT_SECS);
secs.clamp(MIN_TIMEOUT_SECS, MAX_TIMEOUT_SECS)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanitize_timeout_clamps_values() {
assert_eq!(
SpeedtestService::sanitize_timeout(Some(1)),
MIN_TIMEOUT_SECS
);
assert_eq!(
SpeedtestService::sanitize_timeout(Some(999)),
MAX_TIMEOUT_SECS
);
assert_eq!(
SpeedtestService::sanitize_timeout(Some(10)),
10.min(MAX_TIMEOUT_SECS).max(MIN_TIMEOUT_SECS)
);
assert_eq!(
SpeedtestService::sanitize_timeout(None),
DEFAULT_TIMEOUT_SECS
);
}
#[test]
fn test_endpoints_handles_empty_list() {
let result =
tauri::async_runtime::block_on(SpeedtestService::test_endpoints(Vec::new(), Some(5)))
.expect("empty list should succeed");
assert!(result.is_empty());
}
#[test]
fn test_endpoints_reports_invalid_url() {
let result = tauri::async_runtime::block_on(SpeedtestService::test_endpoints(
vec!["not a url".into(), "".into()],
None,
))
.expect("invalid inputs should still succeed");
assert_eq!(result.len(), 2);
assert!(
result[0]
.error
.as_deref()
.unwrap_or_default()
.starts_with("URL 无效"),
"invalid url should yield parse error"
);
assert_eq!(
result[1].error.as_deref(),
Some("URL 不能为空"),
"empty url should report validation error"
);
}
}

View File

@@ -1,108 +0,0 @@
use futures::future::join_all;
use reqwest::{Client, Url};
use serde::Serialize;
use std::time::{Duration, Instant};
use crate::error::AppError;
const DEFAULT_TIMEOUT_SECS: u64 = 8;
const MAX_TIMEOUT_SECS: u64 = 30;
const MIN_TIMEOUT_SECS: u64 = 2;
#[derive(Debug, Clone, Serialize)]
pub struct EndpointLatency {
pub url: String,
pub latency: Option<u128>,
pub status: Option<u16>,
pub error: Option<String>,
}
fn build_client(timeout_secs: u64) -> Result<Client, AppError> {
Client::builder()
.timeout(Duration::from_secs(timeout_secs))
.redirect(reqwest::redirect::Policy::limited(5))
.user_agent("cc-switch-speedtest/1.0")
.build()
.map_err(|e| AppError::Message(format!("创建 HTTP 客户端失败: {e}")))
}
fn sanitize_timeout(timeout_secs: Option<u64>) -> u64 {
let secs = timeout_secs.unwrap_or(DEFAULT_TIMEOUT_SECS);
secs.clamp(MIN_TIMEOUT_SECS, MAX_TIMEOUT_SECS)
}
pub async fn test_endpoints(
urls: Vec<String>,
timeout_secs: Option<u64>,
) -> Result<Vec<EndpointLatency>, AppError> {
if urls.is_empty() {
return Ok(vec![]);
}
let timeout = sanitize_timeout(timeout_secs);
let client = build_client(timeout)?;
let tasks = urls.into_iter().map(|raw_url| {
let client = client.clone();
async move {
let trimmed = raw_url.trim().to_string();
if trimmed.is_empty() {
return EndpointLatency {
url: raw_url,
latency: None,
status: None,
error: Some("URL 不能为空".to_string()),
};
}
let parsed_url = match Url::parse(&trimmed) {
Ok(url) => url,
Err(err) => {
return EndpointLatency {
url: trimmed,
latency: None,
status: None,
error: Some(format!("URL 无效: {err}")),
};
}
};
// 先进行一次“热身”请求,忽略其结果,仅用于复用连接/绕过首包惩罚
let _ = client.get(parsed_url.clone()).send().await;
// 第二次请求开始计时,并将其作为结果返回
let start = Instant::now();
match client.get(parsed_url).send().await {
Ok(resp) => {
let latency = start.elapsed().as_millis();
EndpointLatency {
url: trimmed,
latency: Some(latency),
status: Some(resp.status().as_u16()),
error: None,
}
}
Err(err) => {
let status = err.status().map(|s| s.as_u16());
let error_message = if err.is_timeout() {
"请求超时".to_string()
} else if err.is_connect() {
"连接失败".to_string()
} else {
err.to_string()
};
EndpointLatency {
url: trimmed,
latency: None,
status,
error: Some(error_message),
}
}
}
}
});
let results = join_all(tasks).await;
Ok(results)
}

View File

@@ -3,8 +3,8 @@ use std::{fs, path::Path, sync::RwLock};
use tauri::async_runtime;
use cc_switch_lib::{
create_backup, get_claude_settings_path, import_config_from_path, read_json_file,
sync_current_providers_to_live, AppError, AppState, AppType, MultiAppConfig, Provider,
get_claude_settings_path, read_json_file, AppError, AppState, AppType, ConfigService,
MultiAppConfig, Provider,
};
#[path = "support.rs"]
@@ -41,7 +41,7 @@ fn sync_claude_provider_writes_live_settings() {
manager.providers.insert("prov-1".to_string(), provider);
manager.current = "prov-1".to_string();
sync_current_providers_to_live(&mut config).expect("sync live settings");
ConfigService::sync_current_providers_to_live(&mut config).expect("sync live settings");
let settings_path = get_claude_settings_path();
assert!(
@@ -110,7 +110,7 @@ fn sync_codex_provider_writes_auth_and_config() {
manager.providers.insert("codex-1".to_string(), provider);
manager.current = "codex-1".to_string();
sync_current_providers_to_live(&mut config).expect("sync codex live");
ConfigService::sync_current_providers_to_live(&mut config).expect("sync codex live");
let auth_path = cc_switch_lib::get_codex_auth_path();
let config_path = cc_switch_lib::get_codex_config_path();
@@ -266,7 +266,7 @@ fn sync_codex_provider_missing_auth_returns_error() {
manager.providers.insert(provider.id.clone(), provider);
manager.current = "codex-missing-auth".to_string();
let err = sync_current_providers_to_live(&mut config)
let err = ConfigService::sync_current_providers_to_live(&mut config)
.expect_err("sync should fail when auth missing");
match err {
cc_switch_lib::AppError::Config(msg) => {
@@ -595,7 +595,7 @@ fn create_backup_skips_missing_file() {
let config_path = home.join(".cc-switch").join("config.json");
// 未创建文件时应返回空字符串,不报错
let result = create_backup(&config_path).expect("create backup");
let result = ConfigService::create_backup(&config_path).expect("create backup");
assert!(
result.is_empty(),
"expected empty backup id when config file missing"
@@ -612,7 +612,7 @@ fn create_backup_generates_snapshot_file() {
fs::create_dir_all(&config_dir).expect("prepare config dir");
fs::write(&config_path, r#"{"version":2}"#).expect("write config file");
let backup_id = create_backup(&config_path).expect("backup success");
let backup_id = ConfigService::create_backup(&config_path).expect("backup success");
assert!(
!backup_id.is_empty(),
"backup id should contain timestamp information"
@@ -651,7 +651,8 @@ fn create_backup_retains_only_latest_entries() {
std::thread::sleep(std::time::Duration::from_secs(1));
let latest_backup_id = create_backup(&config_path).expect("create backup with cleanup");
let latest_backup_id =
ConfigService::create_backup(&config_path).expect("create backup with cleanup");
assert!(
!latest_backup_id.is_empty(),
"backup id should not be empty when config exists"
@@ -731,8 +732,8 @@ fn import_config_from_path_overwrites_state_and_creates_backup() {
config: RwLock::new(MultiAppConfig::default()),
};
let backup_id =
import_config_from_path(&import_path, &app_state).expect("import should succeed");
let backup_id = ConfigService::import_config_from_path(&import_path, &app_state)
.expect("import should succeed");
assert!(
!backup_id.is_empty(),
"expected backup id when original config exists"
@@ -787,7 +788,8 @@ fn import_config_from_path_invalid_json_returns_error() {
config: RwLock::new(MultiAppConfig::default()),
};
let err = import_config_from_path(&invalid_path, &app_state).expect_err("import should fail");
let err = ConfigService::import_config_from_path(&invalid_path, &app_state)
.expect_err("import should fail");
match err {
AppError::Json { .. } => {}
other => panic!("expected json error, got {other:?}"),
@@ -805,7 +807,7 @@ fn import_config_from_path_missing_file_produces_io_error() {
config: RwLock::new(MultiAppConfig::default()),
};
let err = import_config_from_path(missing_path, &app_state)
let err = ConfigService::import_config_from_path(missing_path, &app_state)
.expect_err("import should fail for missing file");
match err {
AppError::Io { .. } => {}