Add Phase 5 enhancements: security, i18n, analysis, backup, notifications

- Database v8 migration: tags, pinned, avg_startup_ms columns
- Security scanning with CVE matching and batch scan
- Bundled library extraction and vulnerability reports
- Desktop notification system for security alerts
- Backup/restore system for AppImage configurations
- i18n framework with gettext support
- Runtime analysis and Wayland compatibility detection
- AppStream metadata and Flatpak-style build support
- File watcher module for live directory monitoring
- Preferences panel with GSettings integration
- CLI interface for headless operation
- Detail view: tabbed layout with ViewSwitcher in title bar,
  health score, sandbox controls, changelog links
- Library view: sort dropdown, context menu enhancements
- Dashboard: system status, disk usage, launch history
- Security report page with scan and export
- Packaging: meson build, PKGBUILD, metainfo
This commit is contained in:
lashman
2026-02-27 17:16:41 +02:00
parent a7ed3742fb
commit 423323d5a9
51 changed files with 10583 additions and 481 deletions

437
src/core/backup.rs Normal file
View File

@@ -0,0 +1,437 @@
use std::fs;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::process::Command;
use super::database::Database;
use super::footprint;
/// Manifest describing the contents of a config backup archive.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct BackupManifest {
pub app_name: String,
pub app_version: String,
pub created_at: String,
pub paths: Vec<BackupPathEntry>,
pub total_size: u64,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct BackupPathEntry {
pub original_path: String,
pub path_type: String,
pub relative_path: String,
pub size_bytes: u64,
}
fn backups_dir() -> PathBuf {
let dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("~/.local/share"))
.join("driftwood")
.join("backups");
fs::create_dir_all(&dir).ok();
dir
}
/// Create a backup of an AppImage's config/data files.
/// Returns the path to the created archive.
pub fn create_backup(db: &Database, appimage_id: i64) -> Result<PathBuf, BackupError> {
let record = db.get_appimage_by_id(appimage_id)
.map_err(|e| BackupError::Database(e.to_string()))?
.ok_or(BackupError::NotFound)?;
let app_name = record.app_name.as_deref().unwrap_or(&record.filename);
let app_version = record.app_version.as_deref().unwrap_or("unknown");
// Discover data paths if not already done
let existing_paths = db.get_app_data_paths(appimage_id).unwrap_or_default();
if existing_paths.is_empty() {
footprint::discover_and_store(db, appimage_id, &record);
}
let data_paths = db.get_app_data_paths(appimage_id).unwrap_or_default();
if data_paths.is_empty() {
return Err(BackupError::NoPaths);
}
// Collect files to back up (config and data paths that exist)
let mut entries = Vec::new();
let mut total_size: u64 = 0;
for dp in &data_paths {
let path = Path::new(&dp.path);
if !path.exists() {
continue;
}
// Skip cache paths by default (too large, easily regenerated)
if dp.path_type == "cache" {
continue;
}
let size = dir_size(path);
total_size += size;
// Create a relative path for the archive
let relative = dp.path.replace('/', "_").trim_start_matches('_').to_string();
entries.push(BackupPathEntry {
original_path: dp.path.clone(),
path_type: dp.path_type.clone(),
relative_path: relative,
size_bytes: size,
});
}
if entries.is_empty() {
return Err(BackupError::NoPaths);
}
// Create manifest
let timestamp = chrono::Utc::now().format("%Y%m%d-%H%M%S").to_string();
let manifest = BackupManifest {
app_name: app_name.to_string(),
app_version: app_version.to_string(),
created_at: chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(),
paths: entries.clone(),
total_size,
};
// Create backup archive using tar
let app_id = sanitize_filename(app_name);
let archive_name = format!("{}-{}-{}.tar.gz", app_id, app_version, timestamp);
let archive_path = backups_dir().join(&archive_name);
// Write manifest to a temp file
let temp_dir = tempfile::tempdir().map_err(|e| BackupError::Io(e.to_string()))?;
let manifest_path = temp_dir.path().join("manifest.json");
let manifest_json = serde_json::to_string_pretty(&manifest)
.map_err(|e| BackupError::Io(e.to_string()))?;
fs::write(&manifest_path, &manifest_json)
.map_err(|e| BackupError::Io(e.to_string()))?;
// Build tar command
let mut tar_args = vec![
"czf".to_string(),
archive_path.to_string_lossy().to_string(),
"-C".to_string(),
temp_dir.path().to_string_lossy().to_string(),
"manifest.json".to_string(),
];
for entry in &entries {
let source = Path::new(&entry.original_path);
if source.exists() {
tar_args.push("-C".to_string());
tar_args.push(
source.parent().unwrap_or(Path::new("/")).to_string_lossy().to_string(),
);
tar_args.push(
source.file_name().unwrap_or_default().to_string_lossy().to_string(),
);
}
}
let status = Command::new("tar")
.args(&tar_args)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::piped())
.status()
.map_err(|e| BackupError::Io(format!("tar failed: {}", e)))?;
if !status.success() {
return Err(BackupError::Io("tar archive creation failed".to_string()));
}
// Get archive size
let archive_size = fs::metadata(&archive_path)
.map(|m| m.len() as i64)
.unwrap_or(0);
// Compute checksum
let checksum = compute_file_sha256(&archive_path);
// Record in database
db.insert_config_backup(
appimage_id,
Some(app_version),
&archive_path.to_string_lossy(),
archive_size,
checksum.as_deref(),
entries.len() as i32,
).ok();
Ok(archive_path)
}
/// Restore a backup from an archive.
pub fn restore_backup(archive_path: &Path) -> Result<RestoreResult, BackupError> {
if !archive_path.exists() {
return Err(BackupError::NotFound);
}
// Extract manifest first
let manifest = read_manifest(archive_path)?;
// Extract all files
let temp_dir = tempfile::tempdir().map_err(|e| BackupError::Io(e.to_string()))?;
let status = Command::new("tar")
.args(["xzf", &archive_path.to_string_lossy(), "-C", &temp_dir.path().to_string_lossy()])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.map_err(|e| BackupError::Io(format!("tar extract failed: {}", e)))?;
if !status.success() {
return Err(BackupError::Io("tar extraction failed".to_string()));
}
// Restore each path
let mut restored = 0u32;
let mut skipped = 0u32;
for entry in &manifest.paths {
let source_name = Path::new(&entry.original_path)
.file_name()
.unwrap_or_default();
let extracted = temp_dir.path().join(source_name);
let target = Path::new(&entry.original_path);
if !extracted.exists() {
skipped += 1;
continue;
}
// Create parent directory
if let Some(parent) = target.parent() {
fs::create_dir_all(parent).ok();
}
// Copy files back
if extracted.is_dir() {
copy_dir_recursive(&extracted, target)
.map_err(|e| BackupError::Io(e.to_string()))?;
} else {
fs::copy(&extracted, target)
.map_err(|e| BackupError::Io(e.to_string()))?;
}
restored += 1;
}
Ok(RestoreResult {
manifest,
paths_restored: restored,
paths_skipped: skipped,
})
}
/// List available backups for an AppImage.
pub fn list_backups(db: &Database, appimage_id: Option<i64>) -> Vec<BackupInfo> {
let records = if let Some(id) = appimage_id {
db.get_config_backups(id).unwrap_or_default()
} else {
db.get_all_config_backups().unwrap_or_default()
};
records.iter().map(|r| {
let exists = Path::new(&r.archive_path).exists();
BackupInfo {
id: r.id,
appimage_id: r.appimage_id,
app_version: r.app_version.clone(),
archive_path: r.archive_path.clone(),
archive_size: r.archive_size.unwrap_or(0),
created_at: r.created_at.clone(),
path_count: r.path_count.unwrap_or(0),
exists,
}
}).collect()
}
/// Delete a backup archive and its database record.
pub fn delete_backup(db: &Database, backup_id: i64) -> Result<(), BackupError> {
// Get backup info
let backups = db.get_all_config_backups().unwrap_or_default();
let backup = backups.iter().find(|b| b.id == backup_id)
.ok_or(BackupError::NotFound)?;
// Delete the file
let path = Path::new(&backup.archive_path);
if path.exists() {
fs::remove_file(path).map_err(|e| BackupError::Io(e.to_string()))?;
}
// Delete the database record
db.delete_config_backup(backup_id)
.map_err(|e| BackupError::Database(e.to_string()))?;
Ok(())
}
/// Remove backups older than the specified number of days.
pub fn auto_cleanup_old_backups(db: &Database, retention_days: u32) -> Result<u32, BackupError> {
let backups = db.get_all_config_backups().unwrap_or_default();
let cutoff = chrono::Utc::now() - chrono::Duration::days(retention_days as i64);
let cutoff_str = cutoff.format("%Y-%m-%d %H:%M:%S").to_string();
let mut removed = 0u32;
for backup in &backups {
if backup.created_at < cutoff_str {
if let Ok(()) = delete_backup(db, backup.id) {
removed += 1;
}
}
}
Ok(removed)
}
// --- Helper types ---
#[derive(Debug)]
pub struct BackupInfo {
pub id: i64,
pub appimage_id: i64,
pub app_version: Option<String>,
pub archive_path: String,
pub archive_size: i64,
pub created_at: String,
pub path_count: i32,
pub exists: bool,
}
#[derive(Debug)]
pub struct RestoreResult {
pub manifest: BackupManifest,
pub paths_restored: u32,
pub paths_skipped: u32,
}
#[derive(Debug)]
pub enum BackupError {
NotFound,
NoPaths,
Io(String),
Database(String),
}
impl std::fmt::Display for BackupError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound => write!(f, "Backup not found"),
Self::NoPaths => write!(f, "No config/data paths to back up"),
Self::Io(e) => write!(f, "I/O error: {}", e),
Self::Database(e) => write!(f, "Database error: {}", e),
}
}
}
// --- Utility functions ---
fn sanitize_filename(name: &str) -> String {
name.chars()
.map(|c| if c.is_alphanumeric() || c == '-' || c == '_' { c.to_ascii_lowercase() } else { '-' })
.collect::<String>()
.trim_matches('-')
.to_string()
}
fn dir_size(path: &Path) -> u64 {
if path.is_file() {
return fs::metadata(path).map(|m| m.len()).unwrap_or(0);
}
let mut total = 0u64;
if let Ok(entries) = fs::read_dir(path) {
for entry in entries.flatten() {
let p = entry.path();
if p.is_dir() {
total += dir_size(&p);
} else {
total += fs::metadata(&p).map(|m| m.len()).unwrap_or(0);
}
}
}
total
}
fn compute_file_sha256(path: &Path) -> Option<String> {
let mut file = fs::File::open(path).ok()?;
use sha2::{Sha256, Digest};
let mut hasher = Sha256::new();
let mut buf = [0u8; 8192];
loop {
let n = file.read(&mut buf).ok()?;
if n == 0 { break; }
hasher.update(&buf[..n]);
}
Some(format!("{:x}", hasher.finalize()))
}
fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> {
fs::create_dir_all(dst)?;
for entry in fs::read_dir(src)? {
let entry = entry?;
let src_path = entry.path();
let dst_path = dst.join(entry.file_name());
if src_path.is_dir() {
copy_dir_recursive(&src_path, &dst_path)?;
} else {
fs::copy(&src_path, &dst_path)?;
}
}
Ok(())
}
fn read_manifest(archive_path: &Path) -> Result<BackupManifest, BackupError> {
let output = Command::new("tar")
.args(["xzf", &archive_path.to_string_lossy(), "-O", "manifest.json"])
.output()
.map_err(|e| BackupError::Io(format!("tar extract manifest failed: {}", e)))?;
if !output.status.success() {
return Err(BackupError::Io("Could not read manifest from archive".to_string()));
}
serde_json::from_slice(&output.stdout)
.map_err(|e| BackupError::Io(format!("Invalid manifest: {}", e)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sanitize_filename() {
assert_eq!(sanitize_filename("Firefox"), "firefox");
assert_eq!(sanitize_filename("My Cool App"), "my-cool-app");
assert_eq!(sanitize_filename(" Spaces "), "spaces");
}
#[test]
fn test_backups_dir_path() {
let dir = backups_dir();
assert!(dir.to_string_lossy().contains("driftwood"));
assert!(dir.to_string_lossy().contains("backups"));
}
#[test]
fn test_backup_error_display() {
assert_eq!(format!("{}", BackupError::NotFound), "Backup not found");
assert_eq!(format!("{}", BackupError::NoPaths), "No config/data paths to back up");
}
#[test]
fn test_dir_size_empty() {
let dir = tempfile::tempdir().unwrap();
assert_eq!(dir_size(dir.path()), 0);
}
#[test]
fn test_dir_size_with_files() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "hello world").unwrap();
let size = dir_size(dir.path());
assert!(size > 0);
}
}