Files
driftwood/src/core/database.rs
lashman 7e55d5796f Add WCAG 2.2 AAA compliance and automated AT-SPI audit tool
- Bring all UI widgets to WCAG 2.2 AAA conformance across all views
- Add accessible labels, roles, descriptions, and announcements
- Bump focus outlines to 3px, target sizes to 44px AAA minimum
- Fix announce()/announce_result() to walk widget tree via parent()
- Add AT-SPI accessibility audit script (tools/a11y-audit.py) that
  checks SC 4.1.2, 1.1.1, 1.3.1, 2.1.1, 2.5.5, 2.5.8, 2.4.8,
  2.4.9, 2.4.10, 2.1.3 with JSON report output for CI
- Clean up project structure, archive old plan documents
2026-03-01 12:44:21 +02:00

3485 lines
122 KiB
Rust

use rusqlite::{params, Connection, Result as SqlResult};
use std::path::PathBuf;
use super::catalog;
pub struct Database {
conn: Connection,
}
#[derive(Debug, Clone)]
pub struct AppImageRecord {
pub id: i64,
pub path: String,
pub filename: String,
pub app_name: Option<String>,
pub app_version: Option<String>,
pub appimage_type: Option<i32>,
pub size_bytes: i64,
pub sha256: Option<String>,
pub icon_path: Option<String>,
pub desktop_file: Option<String>,
pub integrated: bool,
pub integrated_at: Option<String>,
pub is_executable: bool,
pub desktop_entry_content: Option<String>,
pub categories: Option<String>,
pub description: Option<String>,
pub developer: Option<String>,
pub architecture: Option<String>,
pub first_seen: String,
pub last_scanned: String,
pub file_modified: Option<String>,
// Phase 2 fields
pub fuse_status: Option<String>,
pub wayland_status: Option<String>,
pub update_info: Option<String>,
pub update_type: Option<String>,
pub latest_version: Option<String>,
pub update_checked: Option<String>,
pub update_url: Option<String>,
pub notes: Option<String>,
// Phase 3 fields
pub sandbox_mode: Option<String>,
// Phase 5 fields
pub runtime_wayland_status: Option<String>,
pub runtime_wayland_checked: Option<String>,
// Async analysis pipeline
pub analysis_status: Option<String>,
// Custom launch arguments
pub launch_args: Option<String>,
// Phase 6 fields
pub tags: Option<String>,
pub pinned: bool,
pub avg_startup_ms: Option<i64>,
// Phase 9 fields - comprehensive metadata
pub appstream_id: Option<String>,
pub appstream_description: Option<String>,
pub generic_name: Option<String>,
pub license: Option<String>,
pub homepage_url: Option<String>,
pub bugtracker_url: Option<String>,
pub donation_url: Option<String>,
pub help_url: Option<String>,
pub vcs_url: Option<String>,
pub keywords: Option<String>,
pub mime_types: Option<String>,
pub content_rating: Option<String>,
pub project_group: Option<String>,
pub release_history: Option<String>,
pub desktop_actions: Option<String>,
pub has_signature: bool,
pub screenshot_urls: Option<String>,
// Phase 11 fields - system modification tracking
pub previous_version_path: Option<String>,
pub source_url: Option<String>,
pub autostart: bool,
pub startup_wm_class: Option<String>,
pub verification_status: Option<String>,
pub first_run_prompted: bool,
pub system_wide: bool,
pub is_portable: bool,
pub mount_point: Option<String>,
}
#[derive(Debug, Clone)]
pub struct SystemModification {
pub id: i64,
pub mod_type: String,
pub file_path: String,
pub previous_value: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum CatalogSortOrder {
NameAsc,
NameDesc,
PopularityDesc,
PopularityAsc,
ReleaseDateDesc,
ReleaseDateAsc,
}
impl CatalogSortOrder {
/// Popularity combines OCS downloads, GitHub stars, and GitHub downloads
/// into a single comparable score.
pub fn sql_clause(&self) -> &'static str {
match self {
Self::NameAsc => "ORDER BY name COLLATE NOCASE ASC",
Self::NameDesc => "ORDER BY name COLLATE NOCASE DESC",
Self::PopularityDesc => "ORDER BY (COALESCE(ocs_downloads, 0) + COALESCE(github_stars, 0) + COALESCE(github_downloads, 0)) DESC, name COLLATE NOCASE ASC",
Self::PopularityAsc => "ORDER BY CASE WHEN ocs_downloads IS NULL AND github_stars IS NULL AND github_downloads IS NULL THEN 1 ELSE 0 END, (COALESCE(ocs_downloads, 0) + COALESCE(github_stars, 0) + COALESCE(github_downloads, 0)) ASC, name COLLATE NOCASE ASC",
Self::ReleaseDateDesc => "ORDER BY COALESCE(release_date, '0000') DESC, name COLLATE NOCASE ASC",
Self::ReleaseDateAsc => "ORDER BY CASE WHEN release_date IS NULL THEN 1 ELSE 0 END, release_date ASC, name COLLATE NOCASE ASC",
}
}
}
#[derive(Debug, Clone)]
pub struct CatalogApp {
pub id: i64,
pub name: String,
pub description: Option<String>,
pub categories: Option<String>,
pub download_url: String,
pub icon_url: Option<String>,
pub homepage: Option<String>,
pub license: Option<String>,
pub screenshots: Option<String>,
pub github_owner: Option<String>,
pub github_repo: Option<String>,
pub github_stars: Option<i64>,
pub github_downloads: Option<i64>,
pub latest_version: Option<String>,
pub release_date: Option<String>,
pub github_enriched_at: Option<String>,
pub github_download_url: Option<String>,
pub github_release_assets: Option<String>,
pub github_description: Option<String>,
pub github_readme: Option<String>,
// OCS (appimagehub.com) metadata
pub ocs_id: Option<i64>,
pub ocs_downloads: Option<i64>,
pub ocs_score: Option<i64>,
pub ocs_typename: Option<String>,
pub ocs_personid: Option<String>,
pub ocs_description: Option<String>,
pub ocs_summary: Option<String>,
pub ocs_version: Option<String>,
pub ocs_tags: Option<String>,
pub ocs_changed: Option<String>,
pub ocs_preview_url: Option<String>,
pub ocs_detailpage: Option<String>,
pub ocs_created: Option<String>,
pub ocs_downloadname: Option<String>,
pub ocs_downloadsize: Option<i64>,
pub ocs_arch: Option<String>,
pub ocs_md5sum: Option<String>,
pub ocs_comments: Option<i64>,
pub release_history: Option<String>,
}
#[derive(Debug, Clone)]
pub struct CatalogSourceRecord {
pub id: i64,
pub name: String,
pub url: String,
pub source_type: String,
pub enabled: bool,
pub last_synced: Option<String>,
pub app_count: i32,
}
#[derive(Debug, Clone)]
pub struct CatalogAppRecord {
pub name: String,
pub description: Option<String>,
pub categories: Option<String>,
pub latest_version: Option<String>,
pub download_url: String,
pub icon_url: Option<String>,
pub homepage: Option<String>,
pub file_size: Option<i64>,
pub architecture: Option<String>,
}
#[derive(Debug, Clone)]
pub struct OrphanedEntry {
pub id: i64,
pub desktop_file: String,
pub original_path: Option<String>,
pub app_name: Option<String>,
pub detected_at: String,
pub cleaned: bool,
}
#[derive(Debug, Clone)]
pub struct LaunchEvent {
pub id: i64,
pub appimage_id: i64,
pub launched_at: String,
pub source: String,
}
#[derive(Debug, Clone)]
pub struct BundledLibraryRecord {
pub id: i64,
pub appimage_id: i64,
pub soname: String,
pub detected_name: Option<String>,
pub detected_version: Option<String>,
pub file_path: Option<String>,
pub file_size: i64,
}
#[derive(Debug, Clone)]
pub struct CveMatchRecord {
pub id: i64,
pub appimage_id: i64,
pub library_id: i64,
pub cve_id: String,
pub severity: Option<String>,
pub cvss_score: Option<f64>,
pub summary: Option<String>,
pub affected_versions: Option<String>,
pub fixed_version: Option<String>,
pub library_soname: String,
pub library_name: Option<String>,
pub library_version: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub struct CveSummary {
pub critical: i64,
pub high: i64,
pub medium: i64,
pub low: i64,
}
impl CveSummary {
pub fn total(&self) -> i64 {
self.critical + self.high + self.medium + self.low
}
pub fn max_severity(&self) -> &'static str {
if self.critical > 0 { "CRITICAL" }
else if self.high > 0 { "HIGH" }
else if self.medium > 0 { "MEDIUM" }
else if self.low > 0 { "LOW" }
else { "NONE" }
}
pub fn badge_class(&self) -> &'static str {
match self.max_severity() {
"CRITICAL" => "error",
"HIGH" => "error",
"MEDIUM" => "warning",
"LOW" => "neutral",
_ => "success",
}
}
}
#[derive(Debug, Clone)]
pub struct AppDataPathRecord {
pub id: i64,
pub appimage_id: i64,
pub path: String,
pub path_type: String,
pub discovery_method: String,
pub confidence: String,
pub size_bytes: i64,
}
#[derive(Debug, Clone)]
pub struct UpdateHistoryEntry {
pub id: i64,
pub appimage_id: i64,
pub from_version: Option<String>,
pub to_version: Option<String>,
pub update_method: Option<String>,
pub download_size: Option<i64>,
pub updated_at: String,
pub success: bool,
}
#[derive(Debug, Clone)]
pub struct ConfigBackupRecord {
pub id: i64,
pub appimage_id: i64,
pub app_version: Option<String>,
pub archive_path: String,
pub archive_size: Option<i64>,
pub checksum: Option<String>,
pub created_at: String,
pub path_count: Option<i32>,
pub restored_count: i32,
pub last_restored_at: Option<String>,
}
#[derive(Debug, Clone)]
pub struct SandboxProfileRecord {
pub id: i64,
pub app_name: String,
pub profile_version: Option<String>,
pub author: Option<String>,
pub description: Option<String>,
pub content: String,
pub source: String,
pub registry_id: Option<String>,
pub created_at: Option<String>,
}
fn db_path() -> PathBuf {
let data_dir = crate::config::data_dir_fallback()
.join("driftwood");
std::fs::create_dir_all(&data_dir).ok();
data_dir.join("driftwood.db")
}
impl Database {
/// Return the path to the database file, or None if the data dir can't be resolved.
pub fn db_path() -> Option<PathBuf> {
Some(db_path())
}
pub fn open() -> SqlResult<Self> {
let path = db_path();
let conn = Connection::open(&path)?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
pub fn open_at(path: &std::path::Path) -> SqlResult<Self> {
std::fs::create_dir_all(path.parent().unwrap_or(std::path::Path::new("/"))).ok();
let conn = Connection::open(path)?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
pub fn open_in_memory() -> SqlResult<Self> {
let conn = Connection::open_in_memory()?;
let db = Self { conn };
db.init_schema()?;
Ok(db)
}
fn init_schema(&self) -> SqlResult<()> {
// Phase 1 base tables
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS schema_version (
version INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS appimages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
path TEXT NOT NULL UNIQUE,
filename TEXT NOT NULL,
app_name TEXT,
app_version TEXT,
appimage_type INTEGER,
size_bytes INTEGER NOT NULL DEFAULT 0,
sha256 TEXT,
icon_path TEXT,
desktop_file TEXT,
integrated INTEGER NOT NULL DEFAULT 0,
integrated_at TEXT,
is_executable INTEGER NOT NULL DEFAULT 0,
desktop_entry_content TEXT,
categories TEXT,
description TEXT,
developer TEXT,
architecture TEXT,
first_seen TEXT NOT NULL DEFAULT (datetime('now')),
last_scanned TEXT NOT NULL DEFAULT (datetime('now')),
file_modified TEXT
);
CREATE TABLE IF NOT EXISTS orphaned_entries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
desktop_file TEXT NOT NULL,
original_path TEXT,
app_name TEXT,
detected_at TEXT NOT NULL DEFAULT (datetime('now')),
cleaned INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS scan_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scan_type TEXT NOT NULL,
directories TEXT,
found INTEGER NOT NULL DEFAULT 0,
new_count INTEGER NOT NULL DEFAULT 0,
removed INTEGER NOT NULL DEFAULT 0,
duration_ms INTEGER NOT NULL DEFAULT 0,
scanned_at TEXT NOT NULL DEFAULT (datetime('now'))
);"
)?;
// Check current schema version and migrate
let count: i32 = self.conn.query_row(
"SELECT COUNT(*) FROM schema_version",
[],
|row| row.get(0),
)?;
let current_version = if count == 0 {
self.conn.execute(
"INSERT INTO schema_version (version) VALUES (?1)",
params![1],
)?;
1
} else {
self.conn.query_row(
"SELECT version FROM schema_version LIMIT 1",
[],
|row| row.get::<_, i32>(0),
)?
};
if current_version < 2 {
self.migrate_to_v2()?;
}
if current_version < 3 {
self.migrate_to_v3()?;
}
if current_version < 4 {
self.migrate_to_v4()?;
}
if current_version < 5 {
self.migrate_to_v5()?;
}
if current_version < 6 {
self.migrate_to_v6()?;
}
if current_version < 7 {
self.migrate_to_v7()?;
}
if current_version < 8 {
self.migrate_to_v8()?;
}
if current_version < 9 {
self.migrate_to_v9()?;
}
if current_version < 10 {
self.migrate_to_v10()?;
}
if current_version < 11 {
self.migrate_to_v11()?;
}
if current_version < 12 {
self.migrate_to_v12()?;
}
if current_version < 13 {
self.migrate_to_v13()?;
}
if current_version < 14 {
self.migrate_to_v14()?;
}
if current_version < 15 {
self.migrate_to_v15()?;
}
if current_version < 16 {
self.migrate_to_v16()?;
}
if current_version < 17 {
self.migrate_to_v17()?;
}
if current_version < 18 {
self.migrate_to_v18()?;
}
if current_version < 19 {
self.migrate_to_v19()?;
}
if current_version < 20 {
self.migrate_to_v20()?;
}
if current_version < 21 {
self.migrate_to_v21()?;
}
if current_version < 22 {
self.migrate_to_v22()?;
}
if current_version < 23 {
self.migrate_to_v23()?;
}
if current_version < 24 {
self.migrate_to_v24()?;
}
// Ensure all expected columns exist (repairs DBs where a migration
// was updated after it had already run on this database)
self.ensure_columns()?;
Ok(())
}
/// Add any missing columns that may have been missed by earlier migrations.
fn ensure_columns(&self) -> SqlResult<()> {
let repair_columns = [
"launch_args TEXT",
"tags TEXT",
"pinned INTEGER NOT NULL DEFAULT 0",
"avg_startup_ms INTEGER",
];
for col_def in &repair_columns {
self.conn.execute(
&format!("ALTER TABLE appimages ADD COLUMN {}", col_def),
[],
).ok(); // Silently ignore "duplicate column" errors
}
Ok(())
}
fn migrate_to_v2(&self) -> SqlResult<()> {
// Add Phase 2 columns to appimages table
let phase2_columns = [
"fuse_status TEXT",
"wayland_status TEXT",
"update_info TEXT",
"update_type TEXT",
"latest_version TEXT",
"update_checked TEXT",
"update_url TEXT",
"notes TEXT",
];
for col in &phase2_columns {
let sql = format!("ALTER TABLE appimages ADD COLUMN {}", col);
self.conn.execute_batch(&sql).ok();
}
// Phase 2 tables
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS launch_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
launched_at TEXT NOT NULL DEFAULT (datetime('now')),
source TEXT NOT NULL DEFAULT 'desktop_entry'
);
CREATE TABLE IF NOT EXISTS update_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
from_version TEXT,
to_version TEXT,
update_method TEXT,
download_size INTEGER,
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
success INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS duplicate_groups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
canonical_name TEXT NOT NULL,
duplicate_type TEXT,
detected_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS duplicate_members (
id INTEGER PRIMARY KEY AUTOINCREMENT,
group_id INTEGER REFERENCES duplicate_groups(id) ON DELETE CASCADE,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
is_recommended INTEGER NOT NULL DEFAULT 0
);"
)?;
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![2],
)?;
Ok(())
}
fn migrate_to_v3(&self) -> SqlResult<()> {
// Phase 3 tables: security scanning
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS bundled_libraries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
soname TEXT NOT NULL,
detected_name TEXT,
detected_version TEXT,
file_path TEXT,
file_size INTEGER DEFAULT 0,
scanned_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS cve_matches (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
library_id INTEGER REFERENCES bundled_libraries(id) ON DELETE CASCADE,
cve_id TEXT NOT NULL,
severity TEXT,
cvss_score REAL,
summary TEXT,
affected_versions TEXT,
fixed_version TEXT,
matched_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS app_data_paths (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
path TEXT NOT NULL,
path_type TEXT NOT NULL DEFAULT 'other',
discovery_method TEXT NOT NULL DEFAULT 'heuristic',
confidence TEXT NOT NULL DEFAULT 'low',
size_bytes INTEGER DEFAULT 0,
first_seen TEXT NOT NULL DEFAULT (datetime('now')),
last_accessed TEXT
);
CREATE INDEX IF NOT EXISTS idx_bundled_libs_appimage
ON bundled_libraries(appimage_id);
CREATE INDEX IF NOT EXISTS idx_cve_matches_appimage
ON cve_matches(appimage_id);
CREATE INDEX IF NOT EXISTS idx_cve_matches_severity
ON cve_matches(severity);
CREATE INDEX IF NOT EXISTS idx_app_data_paths_appimage
ON app_data_paths(appimage_id);"
)?;
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![3],
)?;
Ok(())
}
fn migrate_to_v4(&self) -> SqlResult<()> {
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN sandbox_mode TEXT DEFAULT NULL",
[],
).ok();
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![4],
)?;
Ok(())
}
fn migrate_to_v5(&self) -> SqlResult<()> {
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS config_backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
app_version TEXT,
archive_path TEXT NOT NULL,
archive_size INTEGER,
checksum TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
path_count INTEGER,
restored_count INTEGER DEFAULT 0,
last_restored_at TEXT
);
CREATE TABLE IF NOT EXISTS backup_entries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
backup_id INTEGER REFERENCES config_backups(id) ON DELETE CASCADE,
original_path TEXT NOT NULL,
path_type TEXT NOT NULL,
size_bytes INTEGER
);
CREATE TABLE IF NOT EXISTS exported_reports (
id INTEGER PRIMARY KEY AUTOINCREMENT,
scope TEXT NOT NULL,
format TEXT NOT NULL,
file_path TEXT,
generated_at TEXT NOT NULL DEFAULT (datetime('now')),
app_count INTEGER,
cve_count INTEGER
);
CREATE TABLE IF NOT EXISTS cve_notifications (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
cve_id TEXT NOT NULL,
severity TEXT NOT NULL,
notified_at TEXT NOT NULL DEFAULT (datetime('now')),
user_action TEXT,
acted_at TEXT,
UNIQUE(appimage_id, cve_id)
);
CREATE INDEX IF NOT EXISTS idx_config_backups_appimage
ON config_backups(appimage_id);
CREATE INDEX IF NOT EXISTS idx_cve_notifications_appimage
ON cve_notifications(appimage_id);"
)?;
self.conn.execute_batch(
"ALTER TABLE appimages ADD COLUMN runtime_wayland_status TEXT;
ALTER TABLE appimages ADD COLUMN runtime_wayland_checked TEXT;"
).ok();
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![5],
)?;
Ok(())
}
fn migrate_to_v6(&self) -> SqlResult<()> {
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS catalog_sources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
url TEXT NOT NULL UNIQUE,
source_type TEXT NOT NULL,
enabled INTEGER DEFAULT 1,
last_synced TEXT,
app_count INTEGER DEFAULT 0
);
CREATE TABLE IF NOT EXISTS catalog_apps (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source_id INTEGER REFERENCES catalog_sources(id) ON DELETE CASCADE,
name TEXT NOT NULL,
description TEXT,
categories TEXT,
latest_version TEXT,
download_url TEXT NOT NULL,
icon_url TEXT,
homepage TEXT,
file_size INTEGER,
architecture TEXT,
cached_at TEXT
);
CREATE TABLE IF NOT EXISTS sandbox_profiles (
id INTEGER PRIMARY KEY AUTOINCREMENT,
app_name TEXT NOT NULL,
profile_version TEXT,
author TEXT,
description TEXT,
content TEXT NOT NULL,
source TEXT NOT NULL,
registry_id TEXT,
created_at TEXT DEFAULT (datetime('now')),
applied_to_appimage_id INTEGER REFERENCES appimages(id)
);
CREATE TABLE IF NOT EXISTS sandbox_profile_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
profile_id INTEGER REFERENCES sandbox_profiles(id) ON DELETE CASCADE,
action TEXT NOT NULL,
timestamp TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS runtime_updates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
old_runtime TEXT,
new_runtime TEXT,
backup_path TEXT,
updated_at TEXT DEFAULT (datetime('now')),
success INTEGER
);
CREATE INDEX IF NOT EXISTS idx_catalog_apps_source
ON catalog_apps(source_id);
CREATE UNIQUE INDEX IF NOT EXISTS idx_catalog_apps_source_name
ON catalog_apps(source_id, name);
CREATE INDEX IF NOT EXISTS idx_sandbox_profiles_app
ON sandbox_profiles(app_name);
CREATE INDEX IF NOT EXISTS idx_runtime_updates_appimage
ON runtime_updates(appimage_id);"
)?;
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![6],
)?;
Ok(())
}
fn migrate_to_v7(&self) -> SqlResult<()> {
// Async analysis pipeline and custom launch arguments
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN analysis_status TEXT DEFAULT 'complete'",
[],
).ok();
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN launch_args TEXT",
[],
).ok();
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![7],
)?;
Ok(())
}
fn migrate_to_v8(&self) -> SqlResult<()> {
// Ensure launch_args exists (may have been missed if v7 migration
// ran before that column was added to the v7 migration code)
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN launch_args TEXT",
[],
).ok();
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN tags TEXT",
[],
).ok();
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN pinned INTEGER NOT NULL DEFAULT 0",
[],
).ok();
self.conn.execute(
"ALTER TABLE appimages ADD COLUMN avg_startup_ms INTEGER",
[],
).ok();
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![8],
)?;
Ok(())
}
fn migrate_to_v9(&self) -> SqlResult<()> {
let new_columns = [
"appstream_id TEXT",
"appstream_description TEXT",
"generic_name TEXT",
"license TEXT",
"homepage_url TEXT",
"bugtracker_url TEXT",
"donation_url TEXT",
"help_url TEXT",
"vcs_url TEXT",
"keywords TEXT",
"mime_types TEXT",
"content_rating TEXT",
"project_group TEXT",
"release_history TEXT",
"desktop_actions TEXT",
"has_signature INTEGER NOT NULL DEFAULT 0",
];
for col in &new_columns {
let sql = format!("ALTER TABLE appimages ADD COLUMN {}", col);
match self.conn.execute(&sql, []) {
Ok(_) => {}
Err(e) => {
let msg = e.to_string();
if !msg.contains("duplicate column") {
return Err(e);
}
}
}
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![9],
)?;
Ok(())
}
fn migrate_to_v10(&self) -> SqlResult<()> {
let sql = "ALTER TABLE appimages ADD COLUMN screenshot_urls TEXT";
match self.conn.execute(sql, []) {
Ok(_) => {}
Err(e) => {
let msg = e.to_string();
if !msg.contains("duplicate column") {
return Err(e);
}
}
}
// Force one-time re-analysis so the new AppStream parser (screenshots,
// extended metadata) runs on existing apps
self.conn.execute(
"UPDATE appimages SET analysis_status = NULL WHERE analysis_status = 'complete'",
[],
)?;
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![10],
)?;
Ok(())
}
fn migrate_to_v11(&self) -> SqlResult<()> {
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS system_modifications (
id INTEGER PRIMARY KEY AUTOINCREMENT,
appimage_id INTEGER REFERENCES appimages(id) ON DELETE CASCADE,
mod_type TEXT NOT NULL,
file_path TEXT NOT NULL,
previous_value TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX IF NOT EXISTS idx_system_mods_appimage
ON system_modifications(appimage_id);"
)?;
let new_columns = [
"previous_version_path TEXT",
"source_url TEXT",
"autostart INTEGER NOT NULL DEFAULT 0",
"startup_wm_class TEXT",
"verification_status TEXT",
"first_run_prompted INTEGER NOT NULL DEFAULT 0",
"system_wide INTEGER NOT NULL DEFAULT 0",
"is_portable INTEGER NOT NULL DEFAULT 0",
"mount_point TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE appimages ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![11],
)?;
Ok(())
}
fn migrate_to_v12(&self) -> SqlResult<()> {
let new_columns = [
"screenshots TEXT",
"license TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![12],
)?;
Ok(())
}
fn migrate_to_v13(&self) -> SqlResult<()> {
// Remove duplicate catalog_apps entries, keeping the row with the highest id
// (most recent insert) per (source_id, name) pair
self.conn.execute_batch(
"DELETE FROM catalog_apps WHERE id NOT IN (
SELECT MAX(id) FROM catalog_apps GROUP BY source_id, name
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_catalog_apps_source_name
ON catalog_apps(source_id, name);
UPDATE schema_version SET version = 13;"
)?;
Ok(())
}
fn migrate_to_v14(&self) -> SqlResult<()> {
let new_columns = [
"github_owner TEXT",
"github_repo TEXT",
"github_stars INTEGER",
"github_downloads INTEGER",
"release_date TEXT",
"github_enriched_at TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![14],
)?;
Ok(())
}
fn migrate_to_v15(&self) -> SqlResult<()> {
let new_columns = [
"github_download_url TEXT",
"github_release_assets TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![15],
)?;
Ok(())
}
fn migrate_to_v16(&self) -> SqlResult<()> {
let new_columns = [
"github_description TEXT",
"github_readme TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![16],
)?;
Ok(())
}
fn migrate_to_v17(&self) -> SqlResult<()> {
let new_columns = [
"ocs_id INTEGER",
"ocs_downloads INTEGER",
"ocs_score INTEGER",
"ocs_typename TEXT",
"ocs_personid TEXT",
"ocs_description TEXT",
"ocs_summary TEXT",
"ocs_version TEXT",
"ocs_tags TEXT",
"ocs_changed TEXT",
"ocs_preview_url TEXT",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![17],
)?;
Ok(())
}
fn migrate_to_v18(&self) -> SqlResult<()> {
let new_columns = [
"ocs_detailpage TEXT",
"ocs_created TEXT",
"ocs_downloadname TEXT",
"ocs_downloadsize INTEGER",
"ocs_arch TEXT",
"ocs_md5sum TEXT",
"ocs_comments INTEGER",
];
for col in &new_columns {
let sql = format!("ALTER TABLE catalog_apps ADD COLUMN {}", col);
self.conn.execute(&sql, []).ok();
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![18],
)?;
Ok(())
}
fn migrate_to_v19(&self) -> SqlResult<()> {
self.conn.execute(
"ALTER TABLE catalog_apps ADD COLUMN release_history TEXT",
[],
).ok();
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![19],
)?;
Ok(())
}
/// Re-categorize OCS apps using the exhaustive typename mapping.
fn migrate_to_v20(&self) -> SqlResult<()> {
let mut stmt = self.conn.prepare(
"SELECT id, ocs_typename FROM catalog_apps WHERE ocs_typename IS NOT NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, typename) in &rows {
let cats = catalog::map_ocs_category(typename);
let cats_str = cats.join(";");
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![cats_str, id],
)?;
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![20],
)?;
Ok(())
}
/// Normalize all catalog app categories (OCS and non-OCS) to clean groups.
fn migrate_to_v21(&self) -> SqlResult<()> {
let mut stmt = self.conn.prepare(
"SELECT id, categories, ocs_typename FROM catalog_apps WHERE categories IS NOT NULL AND categories <> ''",
)?;
let rows: Vec<(i64, String, Option<String>)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, cats_str, ocs_typename) in &rows {
let cats: Vec<String> = cats_str.split(';').filter(|s| !s.is_empty()).map(|s| s.to_string()).collect();
let normalized = if ocs_typename.is_some() {
// OCS apps: already mapped by v20, but re-normalize for consistency
cats
} else {
// Non-OCS apps: normalize FreeDesktop categories
catalog::normalize_categories(cats)
};
let new_str = normalized.join(";");
if new_str != *cats_str {
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![new_str, id],
)?;
}
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![21],
)?;
Ok(())
}
/// Re-normalize all non-OCS categories to clean groups (fixes v21 fallback bug).
fn migrate_to_v22(&self) -> SqlResult<()> {
let mut stmt = self.conn.prepare(
"SELECT id, categories FROM catalog_apps WHERE categories IS NOT NULL AND categories <> '' AND ocs_typename IS NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, cats_str) in &rows {
let cats: Vec<String> = cats_str.split(';').filter(|s| !s.is_empty()).map(|s| s.to_string()).collect();
let normalized = catalog::normalize_categories(cats);
let new_str = normalized.join(";");
if new_str != *cats_str {
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![new_str, id],
)?;
}
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![22],
)?;
Ok(())
}
/// Re-categorize all apps with expanded category set (Communication, Multimedia, Photography, Productivity).
fn migrate_to_v23(&self) -> SqlResult<()> {
// Re-map OCS apps
{
let mut stmt = self.conn.prepare(
"SELECT id, ocs_typename FROM catalog_apps WHERE ocs_typename IS NOT NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, typename) in &rows {
let cats = catalog::map_ocs_category(typename);
let cats_str = cats.join(";");
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![cats_str, id],
)?;
}
}
// Re-normalize non-OCS apps
{
let mut stmt = self.conn.prepare(
"SELECT id, categories FROM catalog_apps WHERE categories IS NOT NULL AND categories <> '' AND ocs_typename IS NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, cats_str) in &rows {
let cats: Vec<String> = cats_str.split(';').filter(|s| !s.is_empty()).map(|s| s.to_string()).collect();
let normalized = catalog::normalize_categories(cats);
let new_str = normalized.join(";");
if new_str != *cats_str {
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![new_str, id],
)?;
}
}
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![23],
)?;
Ok(())
}
/// Consolidate to 11 categories (merge thin ones into larger groups).
fn migrate_to_v24(&self) -> SqlResult<()> {
// Re-map OCS apps
{
let mut stmt = self.conn.prepare(
"SELECT id, ocs_typename FROM catalog_apps WHERE ocs_typename IS NOT NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, typename) in &rows {
let cats = catalog::map_ocs_category(typename);
let cats_str = cats.join(";");
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![cats_str, id],
)?;
}
}
// Re-normalize non-OCS apps
{
let mut stmt = self.conn.prepare(
"SELECT id, categories FROM catalog_apps WHERE categories IS NOT NULL AND categories <> '' AND ocs_typename IS NULL",
)?;
let rows: Vec<(i64, String)> = stmt.query_map([], |row| {
Ok((row.get(0)?, row.get(1)?))
})?.collect::<SqlResult<Vec<_>>>()?;
for (id, cats_str) in &rows {
let cats: Vec<String> = cats_str.split(';').filter(|s| !s.is_empty()).map(|s| s.to_string()).collect();
let normalized = catalog::normalize_categories(cats);
let new_str = normalized.join(";");
if new_str != *cats_str {
self.conn.execute(
"UPDATE catalog_apps SET categories = ?1 WHERE id = ?2",
params![new_str, id],
)?;
}
}
}
self.conn.execute(
"UPDATE schema_version SET version = ?1",
params![24],
)?;
Ok(())
}
pub fn upsert_appimage(
&self,
path: &str,
filename: &str,
appimage_type: Option<i32>,
size_bytes: i64,
is_executable: bool,
file_modified: Option<&str>,
) -> SqlResult<i64> {
let id: i64 = self.conn.query_row(
"INSERT INTO appimages (path, filename, appimage_type, size_bytes, is_executable, file_modified)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
ON CONFLICT(path) DO UPDATE SET
filename = excluded.filename,
appimage_type = excluded.appimage_type,
size_bytes = excluded.size_bytes,
is_executable = excluded.is_executable,
file_modified = excluded.file_modified,
last_scanned = datetime('now')
RETURNING id",
params![path, filename, appimage_type, size_bytes, is_executable, file_modified],
|row| row.get(0),
)?;
Ok(id)
}
pub fn update_metadata(
&self,
id: i64,
app_name: Option<&str>,
app_version: Option<&str>,
description: Option<&str>,
developer: Option<&str>,
categories: Option<&str>,
architecture: Option<&str>,
icon_path: Option<&str>,
desktop_entry_content: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET
app_name = ?2,
app_version = ?3,
description = ?4,
developer = ?5,
categories = ?6,
architecture = ?7,
icon_path = ?8,
desktop_entry_content = ?9
WHERE id = ?1",
params![
id, app_name, app_version, description, developer,
categories, architecture, icon_path, desktop_entry_content,
],
)?;
Ok(())
}
pub fn update_appstream_metadata(
&self,
id: i64,
appstream_id: Option<&str>,
appstream_description: Option<&str>,
generic_name: Option<&str>,
license: Option<&str>,
homepage_url: Option<&str>,
bugtracker_url: Option<&str>,
donation_url: Option<&str>,
help_url: Option<&str>,
vcs_url: Option<&str>,
keywords: Option<&str>,
mime_types: Option<&str>,
content_rating: Option<&str>,
project_group: Option<&str>,
release_history: Option<&str>,
desktop_actions: Option<&str>,
has_signature: bool,
screenshot_urls: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET
appstream_id = ?2,
appstream_description = ?3,
generic_name = ?4,
license = ?5,
homepage_url = ?6,
bugtracker_url = ?7,
donation_url = ?8,
help_url = ?9,
vcs_url = ?10,
keywords = ?11,
mime_types = ?12,
content_rating = ?13,
project_group = ?14,
release_history = ?15,
desktop_actions = ?16,
has_signature = ?17,
screenshot_urls = ?18
WHERE id = ?1",
params![
id,
appstream_id,
appstream_description,
generic_name,
license,
homepage_url,
bugtracker_url,
donation_url,
help_url,
vcs_url,
keywords,
mime_types,
content_rating,
project_group,
release_history,
desktop_actions,
has_signature,
screenshot_urls,
],
)?;
Ok(())
}
pub fn update_sha256(&self, id: i64, sha256: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET sha256 = ?2 WHERE id = ?1",
params![id, sha256],
)?;
Ok(())
}
pub fn set_integrated(
&self,
id: i64,
integrated: bool,
desktop_file: Option<&str>,
) -> SqlResult<()> {
let integrated_at = if integrated {
Some(chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string())
} else {
None
};
self.conn.execute(
"UPDATE appimages SET integrated = ?2, desktop_file = ?3, integrated_at = ?4 WHERE id = ?1",
params![id, integrated, desktop_file, integrated_at],
)?;
Ok(())
}
const APPIMAGE_COLUMNS: &str =
"id, path, filename, app_name, app_version, appimage_type,
size_bytes, sha256, icon_path, desktop_file, integrated,
integrated_at, is_executable, desktop_entry_content,
categories, description, developer, architecture,
first_seen, last_scanned, file_modified,
fuse_status, wayland_status, update_info, update_type,
latest_version, update_checked, update_url, notes, sandbox_mode,
runtime_wayland_status, runtime_wayland_checked, analysis_status,
launch_args, tags, pinned, avg_startup_ms,
appstream_id, appstream_description, generic_name, license,
homepage_url, bugtracker_url, donation_url, help_url, vcs_url,
keywords, mime_types, content_rating, project_group,
release_history, desktop_actions, has_signature, screenshot_urls,
previous_version_path, source_url, autostart, startup_wm_class,
verification_status, first_run_prompted, system_wide, is_portable, mount_point";
const CATALOG_APP_COLUMNS: &str =
"id, name, description, categories, download_url, icon_url, homepage, license, screenshots,
github_owner, github_repo, github_stars, github_downloads, latest_version, release_date,
github_enriched_at, github_download_url, github_release_assets, github_description, github_readme,
ocs_id, ocs_downloads, ocs_score, ocs_typename, ocs_personid, ocs_description, ocs_summary,
ocs_version, ocs_tags, ocs_changed, ocs_preview_url,
ocs_detailpage, ocs_created, ocs_downloadname, ocs_downloadsize, ocs_arch, ocs_md5sum, ocs_comments,
release_history";
/// SQL filter that deduplicates catalog apps by lowercase name.
/// Keeps the OCS entry when both OCS and secondary source entries exist for the same name.
/// Also handles within-source case duplicates (e.g. "Sabaki" vs "sabaki").
const CATALOG_DEDUP_FILTER: &str =
"AND id IN (
SELECT id FROM (
SELECT id, ROW_NUMBER() OVER (
PARTITION BY LOWER(name)
ORDER BY CASE WHEN ocs_id IS NOT NULL THEN 0 ELSE 1 END, id DESC
) AS rn
FROM catalog_apps
) WHERE rn = 1
)";
fn catalog_app_from_row(row: &rusqlite::Row) -> rusqlite::Result<CatalogApp> {
Ok(CatalogApp {
id: row.get(0)?,
name: row.get(1)?,
description: row.get(2)?,
categories: row.get(3)?,
download_url: row.get(4)?,
icon_url: row.get(5)?,
homepage: row.get(6)?,
license: row.get(7)?,
screenshots: row.get(8)?,
github_owner: row.get(9)?,
github_repo: row.get(10)?,
github_stars: row.get(11)?,
github_downloads: row.get(12)?,
latest_version: row.get(13)?,
release_date: row.get(14)?,
github_enriched_at: row.get(15)?,
github_download_url: row.get(16)?,
github_release_assets: row.get(17)?,
github_description: row.get(18)?,
github_readme: row.get(19)?,
ocs_id: row.get(20).unwrap_or(None),
ocs_downloads: row.get(21).unwrap_or(None),
ocs_score: row.get(22).unwrap_or(None),
ocs_typename: row.get(23).unwrap_or(None),
ocs_personid: row.get(24).unwrap_or(None),
ocs_description: row.get(25).unwrap_or(None),
ocs_summary: row.get(26).unwrap_or(None),
ocs_version: row.get(27).unwrap_or(None),
ocs_tags: row.get(28).unwrap_or(None),
ocs_changed: row.get(29).unwrap_or(None),
ocs_preview_url: row.get(30).unwrap_or(None),
ocs_detailpage: row.get(31).unwrap_or(None),
ocs_created: row.get(32).unwrap_or(None),
ocs_downloadname: row.get(33).unwrap_or(None),
ocs_downloadsize: row.get(34).unwrap_or(None),
ocs_arch: row.get(35).unwrap_or(None),
ocs_md5sum: row.get(36).unwrap_or(None),
ocs_comments: row.get(37).unwrap_or(None),
release_history: row.get(38).unwrap_or(None),
})
}
fn row_to_record(row: &rusqlite::Row) -> rusqlite::Result<AppImageRecord> {
Ok(AppImageRecord {
id: row.get(0)?,
path: row.get(1)?,
filename: row.get(2)?,
app_name: row.get(3)?,
app_version: row.get(4)?,
appimage_type: row.get(5)?,
size_bytes: row.get(6)?,
sha256: row.get(7)?,
icon_path: row.get(8)?,
desktop_file: row.get(9)?,
integrated: row.get(10)?,
integrated_at: row.get(11)?,
is_executable: row.get(12)?,
desktop_entry_content: row.get(13)?,
categories: row.get(14)?,
description: row.get(15)?,
developer: row.get(16)?,
architecture: row.get(17)?,
first_seen: row.get(18)?,
last_scanned: row.get(19)?,
file_modified: row.get(20)?,
fuse_status: row.get(21)?,
wayland_status: row.get(22)?,
update_info: row.get(23)?,
update_type: row.get(24)?,
latest_version: row.get(25)?,
update_checked: row.get(26)?,
update_url: row.get(27)?,
notes: row.get(28)?,
sandbox_mode: row.get(29)?,
runtime_wayland_status: row.get(30).unwrap_or(None),
runtime_wayland_checked: row.get(31).unwrap_or(None),
analysis_status: row.get(32).unwrap_or(None),
launch_args: row.get(33).unwrap_or(None),
tags: row.get(34).unwrap_or(None),
pinned: row.get::<_, bool>(35).unwrap_or(false),
avg_startup_ms: row.get(36).unwrap_or(None),
appstream_id: row.get(37).unwrap_or(None),
appstream_description: row.get(38).unwrap_or(None),
generic_name: row.get(39).unwrap_or(None),
license: row.get(40).unwrap_or(None),
homepage_url: row.get(41).unwrap_or(None),
bugtracker_url: row.get(42).unwrap_or(None),
donation_url: row.get(43).unwrap_or(None),
help_url: row.get(44).unwrap_or(None),
vcs_url: row.get(45).unwrap_or(None),
keywords: row.get(46).unwrap_or(None),
mime_types: row.get(47).unwrap_or(None),
content_rating: row.get(48).unwrap_or(None),
project_group: row.get(49).unwrap_or(None),
release_history: row.get(50).unwrap_or(None),
desktop_actions: row.get(51).unwrap_or(None),
has_signature: row.get::<_, bool>(52).unwrap_or(false),
screenshot_urls: row.get(53).unwrap_or(None),
previous_version_path: row.get(54).unwrap_or(None),
source_url: row.get(55).unwrap_or(None),
autostart: row.get::<_, bool>(56).unwrap_or(false),
startup_wm_class: row.get(57).unwrap_or(None),
verification_status: row.get(58).unwrap_or(None),
first_run_prompted: row.get::<_, bool>(59).unwrap_or(false),
system_wide: row.get::<_, bool>(60).unwrap_or(false),
is_portable: row.get::<_, bool>(61).unwrap_or(false),
mount_point: row.get(62).unwrap_or(None),
})
}
pub fn get_all_appimages(&self) -> SqlResult<Vec<AppImageRecord>> {
let sql = format!(
"SELECT {} FROM appimages ORDER BY app_name COLLATE NOCASE, filename",
Self::APPIMAGE_COLUMNS
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map([], Self::row_to_record)?;
rows.collect()
}
pub fn get_appimage_by_id(&self, id: i64) -> SqlResult<Option<AppImageRecord>> {
let sql = format!(
"SELECT {} FROM appimages WHERE id = ?1",
Self::APPIMAGE_COLUMNS
);
let mut stmt = self.conn.prepare(&sql)?;
let mut rows = stmt.query_map(params![id], Self::row_to_record)?;
Ok(rows.next().transpose()?)
}
pub fn get_appimage_by_path(&self, path: &str) -> SqlResult<Option<AppImageRecord>> {
let sql = format!(
"SELECT {} FROM appimages WHERE path = ?1",
Self::APPIMAGE_COLUMNS
);
let mut stmt = self.conn.prepare(&sql)?;
let mut rows = stmt.query_map(params![path], Self::row_to_record)?;
Ok(rows.next().transpose()?)
}
pub fn remove_appimage(&self, id: i64) -> SqlResult<()> {
self.conn.execute("DELETE FROM appimages WHERE id = ?1", params![id])?;
Ok(())
}
/// Re-insert a previously deleted AppImageRecord with its original ID.
/// Used for undo-uninstall support.
pub fn restore_appimage_record(&self, r: &AppImageRecord) -> SqlResult<()> {
self.conn.execute(
&format!(
"INSERT OR REPLACE INTO appimages ({}) VALUES ({})",
Self::APPIMAGE_COLUMNS,
(1..=63).map(|i| format!("?{}", i)).collect::<Vec<_>>().join(", ")
),
params![
r.id, r.path, r.filename, r.app_name, r.app_version, r.appimage_type,
r.size_bytes, r.sha256, r.icon_path, r.desktop_file, r.integrated,
r.integrated_at, r.is_executable, r.desktop_entry_content,
r.categories, r.description, r.developer, r.architecture,
r.first_seen, r.last_scanned, r.file_modified,
r.fuse_status, r.wayland_status, r.update_info, r.update_type,
r.latest_version, r.update_checked, r.update_url, r.notes, r.sandbox_mode,
r.runtime_wayland_status, r.runtime_wayland_checked, r.analysis_status,
r.launch_args, r.tags, r.pinned, r.avg_startup_ms,
r.appstream_id, r.appstream_description, r.generic_name, r.license,
r.homepage_url, r.bugtracker_url, r.donation_url, r.help_url, r.vcs_url,
r.keywords, r.mime_types, r.content_rating, r.project_group,
r.release_history, r.desktop_actions, r.has_signature, r.screenshot_urls,
r.previous_version_path, r.source_url, r.autostart, r.startup_wm_class,
r.verification_status, r.first_run_prompted, r.system_wide, r.is_portable,
r.mount_point,
],
)?;
Ok(())
}
pub fn remove_missing_appimages(&self) -> SqlResult<Vec<AppImageRecord>> {
let all = self.get_all_appimages()?;
let mut removed = Vec::new();
for record in all {
if !std::path::Path::new(&record.path).exists() {
self.remove_appimage(record.id)?;
removed.push(record);
}
}
Ok(removed)
}
pub fn add_orphaned_entry(
&self,
desktop_file: &str,
original_path: Option<&str>,
app_name: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO orphaned_entries (desktop_file, original_path, app_name) VALUES (?1, ?2, ?3)",
params![desktop_file, original_path, app_name],
)?;
Ok(())
}
pub fn get_orphaned_entries(&self) -> SqlResult<Vec<OrphanedEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, desktop_file, original_path, app_name, detected_at, cleaned
FROM orphaned_entries WHERE cleaned = 0"
)?;
let rows = stmt.query_map([], |row| {
Ok(OrphanedEntry {
id: row.get(0)?,
desktop_file: row.get(1)?,
original_path: row.get(2)?,
app_name: row.get(3)?,
detected_at: row.get(4)?,
cleaned: row.get(5)?,
})
})?;
rows.collect()
}
pub fn mark_orphan_cleaned(&self, id: i64) -> SqlResult<()> {
self.conn.execute(
"UPDATE orphaned_entries SET cleaned = 1 WHERE id = ?1",
params![id],
)?;
Ok(())
}
pub fn log_scan(
&self,
scan_type: &str,
directories: &[String],
found: i32,
new_count: i32,
removed: i32,
duration_ms: i64,
) -> SqlResult<()> {
let dirs_joined = directories.join(";");
self.conn.execute(
"INSERT INTO scan_log (scan_type, directories, found, new_count, removed, duration_ms)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![scan_type, dirs_joined, found, new_count, removed, duration_ms],
)?;
Ok(())
}
pub fn appimage_count(&self) -> SqlResult<i64> {
self.conn.query_row("SELECT COUNT(*) FROM appimages", [], |row| row.get(0))
}
// --- Phase 2: Status updates ---
pub fn update_fuse_status(&self, id: i64, status: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET fuse_status = ?2 WHERE id = ?1",
params![id, status],
)?;
Ok(())
}
pub fn update_wayland_status(&self, id: i64, status: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET wayland_status = ?2 WHERE id = ?1",
params![id, status],
)?;
Ok(())
}
pub fn update_notes(&self, id: i64, notes: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET notes = ?2 WHERE id = ?1",
params![id, notes],
)?;
Ok(())
}
pub fn update_sandbox_mode(&self, id: i64, mode: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET sandbox_mode = ?2 WHERE id = ?1",
params![id, mode],
)?;
Ok(())
}
pub fn update_launch_args(&self, id: i64, args: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET launch_args = ?2 WHERE id = ?1",
params![id, args],
)?;
Ok(())
}
pub fn update_update_info(
&self,
id: i64,
update_info: Option<&str>,
update_type: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET update_info = ?2, update_type = ?3 WHERE id = ?1",
params![id, update_info, update_type],
)?;
Ok(())
}
pub fn set_update_available(
&self,
id: i64,
latest_version: Option<&str>,
update_url: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET latest_version = ?2, update_url = ?3,
update_checked = datetime('now') WHERE id = ?1",
params![id, latest_version, update_url],
)?;
Ok(())
}
pub fn clear_update_available(&self, id: i64) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET latest_version = NULL, update_url = NULL,
update_checked = datetime('now') WHERE id = ?1",
params![id],
)?;
Ok(())
}
pub fn updatable_count(&self) -> i64 {
self.conn
.query_row(
"SELECT COUNT(*) FROM appimages WHERE latest_version IS NOT NULL",
[],
|row| row.get(0),
)
.unwrap_or(0)
}
pub fn get_appimages_with_updates(&self) -> SqlResult<Vec<AppImageRecord>> {
let sql = format!(
"SELECT {} FROM appimages WHERE latest_version IS NOT NULL
ORDER BY app_name COLLATE NOCASE, filename",
Self::APPIMAGE_COLUMNS
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map([], Self::row_to_record)?;
rows.collect()
}
// --- Phase 2: Launch tracking ---
pub fn record_launch(&self, appimage_id: i64, source: &str) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO launch_events (appimage_id, source) VALUES (?1, ?2)",
params![appimage_id, source],
)?;
Ok(())
}
pub fn get_launch_count(&self, appimage_id: i64) -> SqlResult<i64> {
self.conn.query_row(
"SELECT COUNT(*) FROM launch_events WHERE appimage_id = ?1",
params![appimage_id],
|row| row.get(0),
)
}
pub fn get_last_launched(&self, appimage_id: i64) -> SqlResult<Option<String>> {
self.conn.query_row(
"SELECT MAX(launched_at) FROM launch_events WHERE appimage_id = ?1",
params![appimage_id],
|row| row.get(0),
)
}
pub fn get_launch_events(&self, appimage_id: i64) -> SqlResult<Vec<LaunchEvent>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, launched_at, source
FROM launch_events WHERE appimage_id = ?1
ORDER BY launched_at DESC"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(LaunchEvent {
id: row.get(0)?,
appimage_id: row.get(1)?,
launched_at: row.get(2)?,
source: row.get(3)?,
})
})?;
rows.collect()
}
// --- Phase 2: Update history ---
pub fn record_update(
&self,
appimage_id: i64,
from_version: Option<&str>,
to_version: Option<&str>,
update_method: Option<&str>,
download_size: Option<i64>,
success: bool,
) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO update_history
(appimage_id, from_version, to_version, update_method, download_size, success)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![appimage_id, from_version, to_version, update_method, download_size, success],
)?;
Ok(())
}
// --- Phase 3: Security scanning ---
pub fn clear_bundled_libraries(&self, appimage_id: i64) -> SqlResult<()> {
self.conn.execute(
"DELETE FROM bundled_libraries WHERE appimage_id = ?1",
params![appimage_id],
)?;
Ok(())
}
pub fn insert_bundled_library(
&self,
appimage_id: i64,
soname: &str,
detected_name: Option<&str>,
detected_version: Option<&str>,
file_path: Option<&str>,
file_size: i64,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO bundled_libraries
(appimage_id, soname, detected_name, detected_version, file_path, file_size)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![appimage_id, soname, detected_name, detected_version, file_path, file_size],
)?;
Ok(self.conn.last_insert_rowid())
}
pub fn get_bundled_libraries(&self, appimage_id: i64) -> SqlResult<Vec<BundledLibraryRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, soname, detected_name, detected_version, file_path, file_size
FROM bundled_libraries WHERE appimage_id = ?1
ORDER BY detected_name, soname"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(BundledLibraryRecord {
id: row.get(0)?,
appimage_id: row.get(1)?,
soname: row.get(2)?,
detected_name: row.get(3)?,
detected_version: row.get(4)?,
file_path: row.get(5)?,
file_size: row.get(6)?,
})
})?;
rows.collect()
}
pub fn clear_cve_matches(&self, appimage_id: i64) -> SqlResult<()> {
self.conn.execute(
"DELETE FROM cve_matches WHERE appimage_id = ?1",
params![appimage_id],
)?;
Ok(())
}
pub fn insert_cve_match(
&self,
appimage_id: i64,
library_id: i64,
cve_id: &str,
severity: Option<&str>,
cvss_score: Option<f64>,
summary: Option<&str>,
affected_versions: Option<&str>,
fixed_version: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO cve_matches
(appimage_id, library_id, cve_id, severity, cvss_score, summary, affected_versions, fixed_version)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
params![appimage_id, library_id, cve_id, severity, cvss_score, summary, affected_versions, fixed_version],
)?;
Ok(())
}
pub fn get_cve_matches(&self, appimage_id: i64) -> SqlResult<Vec<CveMatchRecord>> {
let mut stmt = self.conn.prepare(
"SELECT cm.id, cm.appimage_id, cm.library_id, cm.cve_id, cm.severity,
cm.cvss_score, cm.summary, cm.affected_versions, cm.fixed_version,
bl.soname, bl.detected_name, bl.detected_version
FROM cve_matches cm
JOIN bundled_libraries bl ON bl.id = cm.library_id
WHERE cm.appimage_id = ?1
ORDER BY cm.cvss_score DESC NULLS LAST"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(CveMatchRecord {
id: row.get(0)?,
appimage_id: row.get(1)?,
library_id: row.get(2)?,
cve_id: row.get(3)?,
severity: row.get(4)?,
cvss_score: row.get(5)?,
summary: row.get(6)?,
affected_versions: row.get(7)?,
fixed_version: row.get(8)?,
library_soname: row.get(9)?,
library_name: row.get(10)?,
library_version: row.get(11)?,
})
})?;
rows.collect()
}
pub fn get_cve_summary(&self, appimage_id: i64) -> SqlResult<CveSummary> {
let mut summary = CveSummary { critical: 0, high: 0, medium: 0, low: 0 };
let mut stmt = self.conn.prepare(
"SELECT severity, COUNT(*) FROM cve_matches
WHERE appimage_id = ?1 GROUP BY severity"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
let severity: String = row.get::<_, Option<String>>(0)?
.unwrap_or_else(|| "MEDIUM".to_string());
Ok((severity, row.get::<_, i64>(1)?))
})?;
for row in rows {
let (severity, count) = row?;
match severity.as_str() {
"CRITICAL" => summary.critical = count,
"HIGH" => summary.high = count,
"MEDIUM" => summary.medium = count,
"LOW" => summary.low = count,
_ => {}
}
}
Ok(summary)
}
pub fn get_all_cve_summary(&self) -> SqlResult<CveSummary> {
let mut summary = CveSummary { critical: 0, high: 0, medium: 0, low: 0 };
let mut stmt = self.conn.prepare(
"SELECT severity, COUNT(*) FROM cve_matches GROUP BY severity"
)?;
let rows = stmt.query_map([], |row| {
let severity: String = row.get::<_, Option<String>>(0)?
.unwrap_or_else(|| "MEDIUM".to_string());
Ok((severity, row.get::<_, i64>(1)?))
})?;
for row in rows {
let (severity, count) = row?;
match severity.as_str() {
"CRITICAL" => summary.critical = count,
"HIGH" => summary.high = count,
"MEDIUM" => summary.medium = count,
"LOW" => summary.low = count,
_ => {}
}
}
Ok(summary)
}
// --- Phase 3: App data paths ---
pub fn insert_app_data_path(
&self,
appimage_id: i64,
path: &str,
path_type: &str,
discovery_method: &str,
confidence: &str,
size_bytes: i64,
) -> SqlResult<()> {
self.conn.execute(
"INSERT OR IGNORE INTO app_data_paths
(appimage_id, path, path_type, discovery_method, confidence, size_bytes)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![appimage_id, path, path_type, discovery_method, confidence, size_bytes],
)?;
Ok(())
}
pub fn get_app_data_paths(&self, appimage_id: i64) -> SqlResult<Vec<AppDataPathRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, path, path_type, discovery_method, confidence, size_bytes
FROM app_data_paths WHERE appimage_id = ?1
ORDER BY path_type, path"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(AppDataPathRecord {
id: row.get(0)?,
appimage_id: row.get(1)?,
path: row.get(2)?,
path_type: row.get(3)?,
discovery_method: row.get(4)?,
confidence: row.get(5)?,
size_bytes: row.get(6)?,
})
})?;
rows.collect()
}
pub fn clear_app_data_paths(&self, appimage_id: i64) -> SqlResult<()> {
self.conn.execute(
"DELETE FROM app_data_paths WHERE appimage_id = ?1",
params![appimage_id],
)?;
Ok(())
}
pub fn get_update_history(&self, appimage_id: i64) -> SqlResult<Vec<UpdateHistoryEntry>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, from_version, to_version, update_method,
download_size, updated_at, success
FROM update_history WHERE appimage_id = ?1
ORDER BY updated_at DESC"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(UpdateHistoryEntry {
id: row.get(0)?,
appimage_id: row.get(1)?,
from_version: row.get(2)?,
to_version: row.get(3)?,
update_method: row.get(4)?,
download_size: row.get(5)?,
updated_at: row.get(6)?,
success: row.get(7)?,
})
})?;
rows.collect()
}
// --- Async analysis pipeline ---
pub fn update_analysis_status(&self, id: i64, status: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET analysis_status = ?2 WHERE id = ?1",
params![id, status],
)?;
Ok(())
}
// --- Phase 5: Runtime Wayland ---
pub fn update_runtime_wayland_status(&self, id: i64, status: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET runtime_wayland_status = ?1, runtime_wayland_checked = datetime('now') WHERE id = ?2",
params![status, id],
)?;
Ok(())
}
// --- Phase 5: Config Backups ---
pub fn insert_config_backup(
&self,
appimage_id: i64,
app_version: Option<&str>,
archive_path: &str,
archive_size: i64,
checksum: Option<&str>,
path_count: i32,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO config_backups (appimage_id, app_version, archive_path, archive_size, checksum, path_count)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![appimage_id, app_version, archive_path, archive_size, checksum, path_count],
)?;
Ok(self.conn.last_insert_rowid())
}
pub fn get_config_backups(&self, appimage_id: i64) -> SqlResult<Vec<ConfigBackupRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, app_version, archive_path, archive_size, checksum,
created_at, path_count, restored_count, last_restored_at
FROM config_backups WHERE appimage_id = ?1 ORDER BY created_at DESC"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(ConfigBackupRecord {
id: row.get(0)?,
appimage_id: row.get(1)?,
app_version: row.get(2)?,
archive_path: row.get(3)?,
archive_size: row.get(4)?,
checksum: row.get(5)?,
created_at: row.get(6)?,
path_count: row.get(7)?,
restored_count: row.get(8)?,
last_restored_at: row.get(9)?,
})
})?;
rows.collect()
}
pub fn get_all_config_backups(&self) -> SqlResult<Vec<ConfigBackupRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, appimage_id, app_version, archive_path, archive_size, checksum,
created_at, path_count, restored_count, last_restored_at
FROM config_backups ORDER BY created_at DESC"
)?;
let rows = stmt.query_map([], |row| {
Ok(ConfigBackupRecord {
id: row.get(0)?,
appimage_id: row.get(1)?,
app_version: row.get(2)?,
archive_path: row.get(3)?,
archive_size: row.get(4)?,
checksum: row.get(5)?,
created_at: row.get(6)?,
path_count: row.get(7)?,
restored_count: row.get(8)?,
last_restored_at: row.get(9)?,
})
})?;
rows.collect()
}
pub fn delete_config_backup(&self, backup_id: i64) -> SqlResult<()> {
self.conn.execute("DELETE FROM config_backups WHERE id = ?1", params![backup_id])?;
Ok(())
}
// --- Phase 5: CVE Notifications ---
pub fn has_cve_been_notified(&self, appimage_id: i64, cve_id: &str) -> SqlResult<bool> {
let count: i32 = self.conn.query_row(
"SELECT COUNT(*) FROM cve_notifications WHERE appimage_id = ?1 AND cve_id = ?2",
params![appimage_id, cve_id],
|row| row.get(0),
)?;
Ok(count > 0)
}
pub fn mark_cve_notified(
&self,
appimage_id: i64,
cve_id: &str,
severity: &str,
) -> SqlResult<()> {
self.conn.execute(
"INSERT OR IGNORE INTO cve_notifications (appimage_id, cve_id, severity)
VALUES (?1, ?2, ?3)",
params![appimage_id, cve_id, severity],
)?;
Ok(())
}
// --- Phase 5: Sandbox Profiles ---
pub fn insert_sandbox_profile(
&self,
app_name: &str,
profile_version: Option<&str>,
author: Option<&str>,
description: Option<&str>,
content: &str,
source: &str,
registry_id: Option<&str>,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO sandbox_profiles (app_name, profile_version, author, description, content, source, registry_id)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![app_name, profile_version, author, description, content, source, registry_id],
)?;
Ok(self.conn.last_insert_rowid())
}
pub fn get_sandbox_profile_for_app(&self, app_name: &str) -> SqlResult<Option<SandboxProfileRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, app_name, profile_version, author, description, content, source, registry_id, created_at
FROM sandbox_profiles WHERE app_name = ?1 ORDER BY created_at DESC LIMIT 1"
)?;
let mut rows = stmt.query_map(params![app_name], |row| {
Ok(SandboxProfileRecord {
id: row.get(0)?,
app_name: row.get(1)?,
profile_version: row.get(2)?,
author: row.get(3)?,
description: row.get(4)?,
content: row.get(5)?,
source: row.get(6)?,
registry_id: row.get(7)?,
created_at: row.get(8)?,
})
})?;
Ok(rows.next().transpose()?)
}
pub fn delete_sandbox_profile(&self, profile_id: i64) -> SqlResult<()> {
self.conn.execute(
"DELETE FROM sandbox_profiles WHERE id = ?1",
params![profile_id],
)?;
Ok(())
}
pub fn get_all_sandbox_profiles(&self) -> SqlResult<Vec<SandboxProfileRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, app_name, profile_version, author, description, content, source, registry_id, created_at
FROM sandbox_profiles ORDER BY app_name ASC"
)?;
let rows = stmt.query_map([], |row| {
Ok(SandboxProfileRecord {
id: row.get(0)?,
app_name: row.get(1)?,
profile_version: row.get(2)?,
author: row.get(3)?,
description: row.get(4)?,
content: row.get(5)?,
source: row.get(6)?,
registry_id: row.get(7)?,
created_at: row.get(8)?,
})
})?;
rows.collect()
}
// --- Phase 6: Tags, Pin, Startup Time ---
pub fn update_tags(&self, id: i64, tags: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET tags = ?2 WHERE id = ?1",
params![id, tags],
)?;
Ok(())
}
/// Get all distinct tags used across all installed apps.
/// Returns a sorted, deduplicated list of tag strings.
pub fn get_all_tags(&self) -> SqlResult<Vec<String>> {
let mut stmt = self.conn.prepare(
"SELECT tags FROM appimages WHERE tags IS NOT NULL AND tags != ''"
)?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
let mut tag_set = std::collections::BTreeSet::new();
for row in rows {
if let Ok(tags_str) = row {
for tag in tags_str.split(',') {
let trimmed = tag.trim();
if !trimmed.is_empty() {
tag_set.insert(trimmed.to_string());
}
}
}
}
Ok(tag_set.into_iter().collect())
}
pub fn set_pinned(&self, id: i64, pinned: bool) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET pinned = ?2 WHERE id = ?1",
params![id, pinned],
)?;
Ok(())
}
pub fn update_avg_startup_ms(&self, id: i64, ms: i64) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET avg_startup_ms = ?2 WHERE id = ?1",
params![id, ms],
)?;
Ok(())
}
pub fn get_launch_history_daily(&self, id: Option<i64>, days: i32) -> SqlResult<Vec<(String, i64)>> {
let days_param = format!("-{} days", days);
if let Some(app_id) = id {
let mut stmt = self.conn.prepare(
"SELECT date(launched_at) as day, COUNT(*) as cnt
FROM launch_events
WHERE appimage_id = ?1 AND launched_at >= datetime('now', ?2)
GROUP BY day ORDER BY day"
)?;
let rows = stmt.query_map(params![app_id, days_param], |row| {
Ok((row.get::<_, String>(0)?, row.get::<_, i64>(1)?))
})?;
rows.collect()
} else {
let mut stmt = self.conn.prepare(
"SELECT date(launched_at) as day, COUNT(*) as cnt
FROM launch_events
WHERE launched_at >= datetime('now', ?1)
GROUP BY day ORDER BY day"
)?;
let rows = stmt.query_map(params![days_param], |row| {
Ok((row.get::<_, String>(0)?, row.get::<_, i64>(1)?))
})?;
rows.collect()
}
}
// --- Phase 5: Runtime Updates ---
pub fn record_runtime_update(
&self,
appimage_id: i64,
old_runtime: Option<&str>,
new_runtime: Option<&str>,
backup_path: Option<&str>,
success: bool,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO runtime_updates (appimage_id, old_runtime, new_runtime, backup_path, success)
VALUES (?1, ?2, ?3, ?4, ?5)",
params![appimage_id, old_runtime, new_runtime, backup_path, success as i32],
)?;
Ok(self.conn.last_insert_rowid())
}
// --- Autostart ---
pub fn set_autostart(&self, id: i64, enabled: bool) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET autostart = ?2 WHERE id = ?1",
params![id, enabled as i32],
)?;
Ok(())
}
pub fn set_startup_wm_class(&self, id: i64, wm_class: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET startup_wm_class = ?2 WHERE id = ?1",
params![id, wm_class],
)?;
Ok(())
}
pub fn set_verification_status(&self, id: i64, status: &str) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET verification_status = ?2 WHERE id = ?1",
params![id, status],
)?;
Ok(())
}
pub fn set_first_run_prompted(&self, id: i64, prompted: bool) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET first_run_prompted = ?2 WHERE id = ?1",
params![id, prompted as i32],
)?;
Ok(())
}
pub fn set_system_wide(&self, id: i64, system_wide: bool) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET system_wide = ?2 WHERE id = ?1",
params![id, system_wide as i32],
)?;
Ok(())
}
pub fn set_portable(&self, id: i64, portable: bool, mount_point: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET is_portable = ?2, mount_point = ?3 WHERE id = ?1",
params![id, portable as i32, mount_point],
)?;
Ok(())
}
// --- Launch statistics ---
pub fn get_top_launched(&self, limit: i32) -> SqlResult<Vec<(String, u64)>> {
let mut stmt = self.conn.prepare(
"SELECT a.app_name, COUNT(l.id) as cnt
FROM launch_events l
JOIN appimages a ON a.id = l.appimage_id
GROUP BY l.appimage_id
ORDER BY cnt DESC
LIMIT ?1"
)?;
let rows = stmt.query_map(params![limit], |row| {
Ok((
row.get::<_, Option<String>>(0)?.unwrap_or_else(|| "Unknown".to_string()),
row.get::<_, u64>(1)?,
))
})?;
rows.collect()
}
pub fn get_total_launch_count(&self) -> SqlResult<u64> {
self.conn.query_row(
"SELECT COUNT(*) FROM launch_events",
[],
|row| row.get(0),
)
}
pub fn get_last_launch(&self) -> SqlResult<Option<(String, String)>> {
match self.conn.query_row(
"SELECT a.app_name, l.launched_at
FROM launch_events l
JOIN appimages a ON a.id = l.appimage_id
ORDER BY l.launched_at DESC
LIMIT 1",
[],
|row| Ok((
row.get::<_, Option<String>>(0)?.unwrap_or_else(|| "Unknown".to_string()),
row.get::<_, String>(1)?,
)),
) {
Ok(pair) => Ok(Some(pair)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
// --- Source URL ---
pub fn set_source_url(&self, id: i64, url: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET source_url = ?2 WHERE id = ?1",
params![id, url],
)?;
Ok(())
}
// --- Version rollback ---
pub fn set_previous_version(&self, id: i64, path: Option<&str>) -> SqlResult<()> {
self.conn.execute(
"UPDATE appimages SET previous_version_path = ?2 WHERE id = ?1",
params![id, path],
)?;
Ok(())
}
pub fn get_previous_version(&self, id: i64) -> SqlResult<Option<String>> {
self.conn.query_row(
"SELECT previous_version_path FROM appimages WHERE id = ?1",
params![id],
|row| row.get(0),
)
}
// --- Similar apps ---
/// Find AppImages from the user's library that share categories with the given app.
pub fn find_similar_apps(
&self,
categories: &str,
exclude_id: i64,
limit: i32,
) -> SqlResult<Vec<(i64, String, Option<String>)>> {
// Split categories and match any overlap
let cats: Vec<&str> = categories.split(';').filter(|s| !s.is_empty()).collect();
if cats.is_empty() {
return Ok(Vec::new());
}
// Build LIKE conditions for each category
let conditions: Vec<String> = cats.iter()
.map(|c| format!("categories LIKE '%{}%'", c.replace('\'', "''")))
.collect();
let where_clause = conditions.join(" OR ");
let sql = format!(
"SELECT id, COALESCE(app_name, filename) AS name, icon_path
FROM appimages
WHERE id != ?1 AND ({})
LIMIT ?2",
where_clause
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map(params![exclude_id, limit], |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, Option<String>>(2)?,
))
})?;
let mut results = Vec::new();
for row in rows {
results.push(row?);
}
Ok(results)
}
// --- System modification tracking ---
pub fn register_modification(
&self,
appimage_id: i64,
mod_type: &str,
file_path: &str,
previous_value: Option<&str>,
) -> SqlResult<i64> {
self.conn.query_row(
"INSERT INTO system_modifications (appimage_id, mod_type, file_path, previous_value)
VALUES (?1, ?2, ?3, ?4)
RETURNING id",
params![appimage_id, mod_type, file_path, previous_value],
|row| row.get(0),
)
}
pub fn get_modifications(&self, appimage_id: i64) -> SqlResult<Vec<SystemModification>> {
let mut stmt = self.conn.prepare(
"SELECT id, mod_type, file_path, previous_value
FROM system_modifications
WHERE appimage_id = ?1
ORDER BY id DESC"
)?;
let rows = stmt.query_map(params![appimage_id], |row| {
Ok(SystemModification {
id: row.get(0)?,
mod_type: row.get(1)?,
file_path: row.get(2)?,
previous_value: row.get(3)?,
})
})?;
rows.collect()
}
pub fn remove_modification(&self, id: i64) -> SqlResult<()> {
self.conn.execute("DELETE FROM system_modifications WHERE id = ?1", params![id])?;
Ok(())
}
// --- Catalog methods ---
pub fn upsert_catalog_source(
&self,
name: &str,
url: &str,
source_type: &str,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO catalog_sources (name, url, source_type, last_synced)
VALUES (?1, ?2, ?3, datetime('now'))
ON CONFLICT(url) DO UPDATE SET last_synced = datetime('now')",
params![name, url, source_type],
)?;
self.conn.query_row(
"SELECT id FROM catalog_sources WHERE url = ?1",
params![url],
|row| row.get(0),
)
}
pub fn upsert_catalog_app(
&self,
source_id: i64,
name: &str,
description: Option<&str>,
categories: Option<&str>,
download_url: &str,
icon_url: Option<&str>,
homepage: Option<&str>,
license: Option<&str>,
) -> SqlResult<i64> {
self.conn.execute(
"INSERT INTO catalog_apps (source_id, name, description, categories, download_url, icon_url, homepage, cached_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, datetime('now'))
ON CONFLICT(source_id, name) DO UPDATE SET
description = COALESCE(?3, description),
categories = COALESCE(?4, categories),
download_url = ?5,
icon_url = COALESCE(?6, icon_url),
homepage = COALESCE(?7, homepage),
cached_at = datetime('now')",
params![source_id, name, description, categories, download_url, icon_url, homepage],
)?;
// Store license as architecture field (reusing available column)
// until a proper column is added
if let Some(lic) = license {
self.conn.execute(
"UPDATE catalog_apps SET architecture = ?1 WHERE source_id = ?2 AND name = ?3",
params![lic, source_id, name],
)?;
}
self.conn.query_row(
"SELECT id FROM catalog_apps WHERE source_id = ?1 AND name = ?2",
params![source_id, name],
|row| row.get(0),
)
}
pub fn search_catalog(
&self,
query: &str,
category: Option<&str>,
limit: i32,
offset: i32,
sort: CatalogSortOrder,
) -> SqlResult<Vec<CatalogApp>> {
let mut sql = format!(
"SELECT {} FROM catalog_apps WHERE 1=1 {}",
Self::CATALOG_APP_COLUMNS,
Self::CATALOG_DEDUP_FILTER,
);
let mut params_list: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
if !query.is_empty() {
sql.push_str(" AND (name LIKE ?1 OR description LIKE ?1 OR ocs_summary LIKE ?1 OR ocs_description LIKE ?1)");
params_list.push(Box::new(format!("%{}%", query)));
}
if let Some(cat) = category {
let idx = params_list.len() + 1;
sql.push_str(&format!(" AND categories LIKE ?{}", idx));
params_list.push(Box::new(format!("%{}%", cat)));
}
sql.push_str(&format!(" {} LIMIT {} OFFSET {}", sort.sql_clause(), limit, offset));
let params_refs: Vec<&dyn rusqlite::types::ToSql> =
params_list.iter().map(|p| p.as_ref()).collect();
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map(params_refs.as_slice(), Self::catalog_app_from_row)?;
let mut results = Vec::new();
for row in rows {
results.push(row?);
}
Ok(results)
}
/// Count matching catalog apps (for pagination).
pub fn count_catalog_matches(
&self,
query: &str,
category: Option<&str>,
) -> SqlResult<i32> {
let mut sql = format!(
"SELECT COUNT(*) FROM catalog_apps WHERE 1=1 {}",
Self::CATALOG_DEDUP_FILTER,
);
let mut params_list: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
if !query.is_empty() {
sql.push_str(" AND (name LIKE ?1 OR description LIKE ?1 OR ocs_summary LIKE ?1 OR ocs_description LIKE ?1)");
params_list.push(Box::new(format!("%{}%", query)));
}
if let Some(cat) = category {
let idx = params_list.len() + 1;
sql.push_str(&format!(" AND categories LIKE ?{}", idx));
params_list.push(Box::new(format!("%{}%", cat)));
}
let params_refs: Vec<&dyn rusqlite::types::ToSql> =
params_list.iter().map(|p| p.as_ref()).collect();
self.conn.query_row(&sql, params_refs.as_slice(), |row| row.get(0))
}
pub fn get_catalog_app(&self, id: i64) -> SqlResult<Option<CatalogApp>> {
let sql = format!(
"SELECT {} FROM catalog_apps WHERE id = ?1",
Self::CATALOG_APP_COLUMNS
);
let result = self.conn.query_row(&sql, params![id], Self::catalog_app_from_row);
match result {
Ok(app) => Ok(Some(app)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
/// Look up release history from catalog for an installed app (by name).
/// Returns the release_history JSON string if a matching catalog app has one.
pub fn get_catalog_release_history_by_name(&self, app_name: &str) -> SqlResult<Option<String>> {
let result = self.conn.query_row(
"SELECT release_history FROM catalog_apps
WHERE LOWER(name) = LOWER(?1) AND release_history IS NOT NULL
LIMIT 1",
params![app_name],
|row| row.get::<_, Option<String>>(0),
);
match result {
Ok(history) => Ok(history),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
/// Get featured catalog apps from a pool of top ~50 popular apps,
/// shuffled deterministically every 15 minutes so the carousel rotates.
pub fn get_featured_catalog_apps(&self, limit: i32) -> SqlResult<Vec<CatalogApp>> {
let sql = format!(
"SELECT {} FROM catalog_apps
WHERE icon_url IS NOT NULL AND icon_url != ''
AND (description IS NOT NULL AND description != ''
OR ocs_summary IS NOT NULL AND ocs_summary != '')
AND (screenshots IS NOT NULL AND screenshots != ''
OR ocs_preview_url IS NOT NULL AND ocs_preview_url != '')
{}",
Self::CATALOG_APP_COLUMNS,
Self::CATALOG_DEDUP_FILTER,
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map([], Self::catalog_app_from_row)?;
let apps: Vec<CatalogApp> = rows.collect::<SqlResult<Vec<_>>>()?;
Self::shuffle_featured_pool(apps, limit)
}
pub fn get_featured_catalog_apps_by_category(&self, limit: i32, category: &str) -> SqlResult<Vec<CatalogApp>> {
let sql = format!(
"SELECT {} FROM catalog_apps
WHERE icon_url IS NOT NULL AND icon_url != ''
AND (description IS NOT NULL AND description != ''
OR ocs_summary IS NOT NULL AND ocs_summary != '')
AND (screenshots IS NOT NULL AND screenshots != ''
OR ocs_preview_url IS NOT NULL AND ocs_preview_url != '')
AND categories LIKE ?1
{}",
Self::CATALOG_APP_COLUMNS,
Self::CATALOG_DEDUP_FILTER,
);
let pattern = format!("%{}%", category);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map(params![pattern], Self::catalog_app_from_row)?;
let apps: Vec<CatalogApp> = rows.collect::<SqlResult<Vec<_>>>()?;
Self::shuffle_featured_pool(apps, limit)
}
/// Sort apps by popularity, take the top pool_size, then deterministically
/// shuffle the pool using a time seed that rotates every 15 minutes.
fn shuffle_featured_pool(mut apps: Vec<CatalogApp>, limit: i32) -> SqlResult<Vec<CatalogApp>> {
const POOL_SIZE: usize = 50;
// Time seed rotates every 15 minutes (900 seconds)
let time_seed = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() / 900;
// Sort by combined popularity to find the top pool
apps.sort_by(|a, b| {
let a_pop = a.ocs_downloads.unwrap_or(0) + a.github_stars.unwrap_or(0);
let b_pop = b.ocs_downloads.unwrap_or(0) + b.github_stars.unwrap_or(0);
b_pop.cmp(&a_pop)
});
// Take the top pool, then shuffle deterministically
apps.truncate(POOL_SIZE);
apps.sort_by(|a, b| {
let ha = (a.id as u64 ^ time_seed).wrapping_mul(0x517cc1b727220a95);
let hb = (b.id as u64 ^ time_seed).wrapping_mul(0x517cc1b727220a95);
ha.cmp(&hb)
});
apps.truncate(limit as usize);
Ok(apps)
}
pub fn get_catalog_categories(&self) -> SqlResult<Vec<(String, u32)>> {
let sql = format!(
"SELECT categories FROM catalog_apps WHERE categories IS NOT NULL AND categories != '' {}",
Self::CATALOG_DEDUP_FILTER,
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
let mut counts = std::collections::HashMap::new();
for row in rows {
if let Ok(cats_str) = row {
for cat in cats_str.split(';').filter(|s| !s.is_empty()) {
*counts.entry(cat.to_string()).or_insert(0u32) += 1;
}
}
}
let mut result: Vec<(String, u32)> = counts.into_iter().collect();
result.sort_by(|a, b| b.1.cmp(&a.1));
Ok(result)
}
pub fn catalog_app_count(&self) -> SqlResult<i64> {
let sql = format!(
"SELECT COUNT(*) FROM catalog_apps WHERE 1=1 {}",
Self::CATALOG_DEDUP_FILTER,
);
self.conn.query_row(&sql, [], |row| row.get(0))
}
pub fn insert_catalog_app(
&self,
source_id: i64,
name: &str,
description: Option<&str>,
categories: Option<&str>,
latest_version: Option<&str>,
download_url: &str,
icon_url: Option<&str>,
homepage: Option<&str>,
file_size: Option<i64>,
architecture: Option<&str>,
screenshots: Option<&str>,
license: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO catalog_apps
(source_id, name, description, categories, latest_version, download_url, icon_url, homepage, file_size, architecture, screenshots, license, cached_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, datetime('now'))
ON CONFLICT(source_id, name) DO UPDATE SET
description = COALESCE(excluded.description, description),
categories = COALESCE(excluded.categories, categories),
latest_version = COALESCE(excluded.latest_version, latest_version),
download_url = excluded.download_url,
icon_url = COALESCE(excluded.icon_url, icon_url),
homepage = COALESCE(excluded.homepage, homepage),
file_size = COALESCE(excluded.file_size, file_size),
architecture = COALESCE(excluded.architecture, architecture),
screenshots = COALESCE(excluded.screenshots, screenshots),
license = COALESCE(excluded.license, license),
cached_at = datetime('now')",
params![source_id, name, description, categories, latest_version, download_url, icon_url, homepage, file_size, architecture, screenshots, license],
)?;
Ok(())
}
pub fn insert_ocs_catalog_app(
&self,
source_id: i64,
name: &str,
ocs_id: i64,
description: Option<&str>,
summary: Option<&str>,
categories: Option<&str>,
version: Option<&str>,
download_url: &str,
icon_url: Option<&str>,
homepage: Option<&str>,
screenshots: Option<&str>,
ocs_downloads: Option<i64>,
ocs_score: Option<i64>,
ocs_typename: Option<&str>,
ocs_personid: Option<&str>,
ocs_tags: Option<&str>,
ocs_changed: Option<&str>,
ocs_preview_url: Option<&str>,
license: Option<&str>,
ocs_detailpage: Option<&str>,
ocs_created: Option<&str>,
ocs_downloadname: Option<&str>,
ocs_downloadsize: Option<i64>,
ocs_arch: Option<&str>,
ocs_md5sum: Option<&str>,
ocs_comments: Option<i64>,
) -> SqlResult<()> {
self.conn.execute(
"INSERT INTO catalog_apps
(source_id, name, description, categories, latest_version, download_url, icon_url, homepage,
screenshots, license, cached_at,
ocs_id, ocs_downloads, ocs_score, ocs_typename, ocs_personid, ocs_description, ocs_summary,
ocs_version, ocs_tags, ocs_changed, ocs_preview_url,
ocs_detailpage, ocs_created, ocs_downloadname, ocs_downloadsize, ocs_arch, ocs_md5sum, ocs_comments)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, datetime('now'),
?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21,
?22, ?23, ?24, ?25, ?26, ?27, ?28)
ON CONFLICT(source_id, name) DO UPDATE SET
description = excluded.description,
categories = excluded.categories,
latest_version = excluded.latest_version,
download_url = excluded.download_url,
icon_url = excluded.icon_url,
homepage = excluded.homepage,
screenshots = excluded.screenshots,
license = excluded.license,
cached_at = datetime('now'),
ocs_id = excluded.ocs_id,
ocs_downloads = excluded.ocs_downloads,
ocs_score = excluded.ocs_score,
ocs_typename = excluded.ocs_typename,
ocs_personid = excluded.ocs_personid,
ocs_description = excluded.ocs_description,
ocs_summary = excluded.ocs_summary,
ocs_version = excluded.ocs_version,
ocs_tags = excluded.ocs_tags,
ocs_changed = excluded.ocs_changed,
ocs_preview_url = excluded.ocs_preview_url,
ocs_detailpage = excluded.ocs_detailpage,
ocs_created = excluded.ocs_created,
ocs_downloadname = excluded.ocs_downloadname,
ocs_downloadsize = excluded.ocs_downloadsize,
ocs_arch = excluded.ocs_arch,
ocs_md5sum = excluded.ocs_md5sum,
ocs_comments = excluded.ocs_comments",
params![
source_id, name, summary, categories, version, download_url, icon_url, homepage,
screenshots, license,
ocs_id, ocs_downloads, ocs_score, ocs_typename, ocs_personid, description, summary,
version, ocs_tags, ocs_changed, ocs_preview_url,
ocs_detailpage, ocs_created, ocs_downloadname, ocs_downloadsize, ocs_arch, ocs_md5sum, ocs_comments
],
)?;
Ok(())
}
/// Delete secondary source entries that have a matching name in the OCS source.
/// This cleans up duplicates from before OCS was added as primary source.
pub fn delete_secondary_duplicates(&self, secondary_source_id: i64) -> SqlResult<usize> {
self.conn.execute(
"DELETE FROM catalog_apps
WHERE source_id = ?1
AND LOWER(name) IN (
SELECT LOWER(name) FROM catalog_apps
WHERE source_id != ?1 AND ocs_id IS NOT NULL
)",
params![secondary_source_id],
)
}
/// Get all app names for a given source (used for deduplication).
pub fn get_catalog_app_names_for_source(&self, source_id: i64) -> SqlResult<std::collections::HashSet<String>> {
let mut stmt = self.conn.prepare(
"SELECT LOWER(name) FROM catalog_apps WHERE source_id = ?1"
)?;
let rows = stmt.query_map(params![source_id], |row| row.get::<_, String>(0))?;
let mut names = std::collections::HashSet::new();
for row in rows {
names.insert(row?);
}
Ok(names)
}
pub fn search_catalog_apps(&self, query: &str) -> SqlResult<Vec<CatalogAppRecord>> {
let pattern = format!("%{}%", query);
let mut stmt = self.conn.prepare(
"SELECT name, description, categories, latest_version, download_url, icon_url, homepage, file_size, architecture
FROM catalog_apps
WHERE name LIKE ?1 OR description LIKE ?1
ORDER BY name
LIMIT 50"
)?;
let rows = stmt.query_map(params![pattern], |row| {
Ok(CatalogAppRecord {
name: row.get(0)?,
description: row.get(1)?,
categories: row.get(2)?,
latest_version: row.get(3)?,
download_url: row.get(4)?,
icon_url: row.get(5)?,
homepage: row.get(6)?,
file_size: row.get(7)?,
architecture: row.get(8)?,
})
})?;
rows.collect()
}
pub fn update_catalog_source_sync(&self, source_id: i64, app_count: i32) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_sources SET last_synced = datetime('now'), app_count = ?2 WHERE id = ?1",
params![source_id, app_count],
)?;
Ok(())
}
pub fn get_catalog_sources(&self) -> SqlResult<Vec<CatalogSourceRecord>> {
let mut stmt = self.conn.prepare(
"SELECT id, name, url, source_type, enabled, last_synced, app_count FROM catalog_sources ORDER BY name"
)?;
let rows = stmt.query_map([], |row| {
Ok(CatalogSourceRecord {
id: row.get(0)?,
name: row.get(1)?,
url: row.get(2)?,
source_type: row.get(3)?,
enabled: row.get::<_, i32>(4).unwrap_or(1) != 0,
last_synced: row.get(5)?,
app_count: row.get(6)?,
})
})?;
rows.collect()
}
pub fn get_catalog_app_by_source_and_name(&self, source_id: i64, name: &str) -> SqlResult<Option<i64>> {
let result = self.conn.query_row(
"SELECT id FROM catalog_apps WHERE source_id = ?1 AND name = ?2",
params![source_id, name],
|row| row.get(0),
);
match result {
Ok(id) => Ok(Some(id)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
// --- GitHub enrichment methods ---
pub fn update_catalog_app_github_repo(
&self,
app_id: i64,
owner: &str,
repo: &str,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_apps SET github_owner = ?2, github_repo = ?3 WHERE id = ?1",
params![app_id, owner, repo],
)?;
Ok(())
}
pub fn update_catalog_app_github_metadata(
&self,
app_id: i64,
stars: i64,
pushed_at: Option<&str>,
description: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_apps SET github_stars = ?2, github_description = COALESCE(?3, github_description), github_enriched_at = datetime('now') WHERE id = ?1",
params![app_id, stars, description],
)?;
// Store pushed_at in release_date if no release info yet
if let Some(pushed) = pushed_at {
self.conn.execute(
"UPDATE catalog_apps SET release_date = COALESCE(release_date, ?2) WHERE id = ?1",
params![app_id, pushed],
)?;
}
Ok(())
}
pub fn update_catalog_app_release_info(
&self,
app_id: i64,
version: Option<&str>,
date: Option<&str>,
downloads: Option<i64>,
github_download_url: Option<&str>,
github_release_assets: Option<&str>,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_apps SET
latest_version = COALESCE(?2, latest_version),
release_date = COALESCE(?3, release_date),
github_downloads = COALESCE(?4, github_downloads),
github_download_url = COALESCE(?5, github_download_url),
github_release_assets = COALESCE(?6, github_release_assets),
github_enriched_at = datetime('now')
WHERE id = ?1",
params![app_id, version, date, downloads, github_download_url, github_release_assets],
)?;
Ok(())
}
pub fn get_unenriched_catalog_apps(&self, limit: i32) -> SqlResult<Vec<CatalogApp>> {
let sql = format!(
"SELECT {} FROM catalog_apps
WHERE github_owner IS NOT NULL AND github_enriched_at IS NULL
{}
ORDER BY id
LIMIT ?1",
Self::CATALOG_APP_COLUMNS,
Self::CATALOG_DEDUP_FILTER,
);
let mut stmt = self.conn.prepare(&sql)?;
let rows = stmt.query_map(params![limit], Self::catalog_app_from_row)?;
rows.collect()
}
pub fn catalog_enrichment_progress(&self) -> SqlResult<(i64, i64)> {
let enriched_sql = format!(
"SELECT COUNT(*) FROM catalog_apps WHERE github_owner IS NOT NULL AND github_enriched_at IS NOT NULL {}",
Self::CATALOG_DEDUP_FILTER,
);
let enriched: i64 = self.conn.query_row(&enriched_sql, [], |row| row.get(0))?;
let total_sql = format!(
"SELECT COUNT(*) FROM catalog_apps WHERE github_owner IS NOT NULL {}",
Self::CATALOG_DEDUP_FILTER,
);
let total_with_github: i64 = self.conn.query_row(&total_sql, [], |row| row.get(0))?;
Ok((enriched, total_with_github))
}
pub fn update_catalog_app_readme(
&self,
app_id: i64,
readme: &str,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_apps SET github_readme = ?2 WHERE id = ?1",
params![app_id, readme],
)?;
Ok(())
}
pub fn update_catalog_app_release_history(
&self,
app_id: i64,
history_json: &str,
) -> SqlResult<()> {
self.conn.execute(
"UPDATE catalog_apps SET release_history = ?2 WHERE id = ?1",
params![app_id, history_json],
)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_and_query() {
let db = Database::open_in_memory().unwrap();
assert_eq!(db.appimage_count().unwrap(), 0);
db.upsert_appimage(
"/home/user/Apps/test.AppImage",
"test.AppImage",
Some(2),
1024000,
true,
None,
).unwrap();
assert_eq!(db.appimage_count().unwrap(), 1);
let all = db.get_all_appimages().unwrap();
assert_eq!(all.len(), 1);
assert_eq!(all[0].filename, "test.AppImage");
assert_eq!(all[0].size_bytes, 1024000);
assert!(all[0].is_executable);
}
#[test]
fn test_upsert_updates_existing() {
let db = Database::open_in_memory().unwrap();
db.upsert_appimage("/path/test.AppImage", "test.AppImage", Some(2), 1000, true, None).unwrap();
db.upsert_appimage("/path/test.AppImage", "test.AppImage", Some(2), 2000, true, None).unwrap();
assert_eq!(db.appimage_count().unwrap(), 1);
let record = db.get_appimage_by_path("/path/test.AppImage").unwrap().unwrap();
assert_eq!(record.size_bytes, 2000);
}
#[test]
fn test_metadata_update() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage("/path/test.AppImage", "test.AppImage", Some(2), 1000, true, None).unwrap();
db.update_metadata(
id,
Some("Test App"),
Some("1.0.0"),
Some("A test application"),
Some("Test Dev"),
Some("Utility;Development"),
Some("x86_64"),
Some("/path/to/icon.png"),
Some("[Desktop Entry]\nName=Test App"),
).unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert_eq!(record.app_name.as_deref(), Some("Test App"));
assert_eq!(record.app_version.as_deref(), Some("1.0.0"));
assert_eq!(record.architecture.as_deref(), Some("x86_64"));
}
#[test]
fn test_integration_toggle() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage("/path/test.AppImage", "test.AppImage", Some(2), 1000, true, None).unwrap();
assert!(!db.get_appimage_by_id(id).unwrap().unwrap().integrated);
db.set_integrated(id, true, Some("/path/to/desktop")).unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert!(record.integrated);
assert!(record.integrated_at.is_some());
db.set_integrated(id, false, None).unwrap();
assert!(!db.get_appimage_by_id(id).unwrap().unwrap().integrated);
}
#[test]
fn test_orphaned_entries() {
let db = Database::open_in_memory().unwrap();
db.add_orphaned_entry("/path/to/desktop", Some("/path/app.AppImage"), Some("App")).unwrap();
let orphans = db.get_orphaned_entries().unwrap();
assert_eq!(orphans.len(), 1);
assert_eq!(orphans[0].app_name.as_deref(), Some("App"));
db.mark_orphan_cleaned(orphans[0].id).unwrap();
assert_eq!(db.get_orphaned_entries().unwrap().len(), 0);
}
#[test]
fn test_scan_log() {
let db = Database::open_in_memory().unwrap();
db.log_scan("manual", &["~/Applications".into()], 5, 3, 0, 250).unwrap();
}
#[test]
fn test_phase2_status_updates() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage("/path/app.AppImage", "app.AppImage", Some(2), 1000, true, None).unwrap();
db.update_fuse_status(id, "fully_functional").unwrap();
db.update_wayland_status(id, "native").unwrap();
db.update_update_info(id, Some("gh-releases-zsync|user|repo|latest|*.zsync"), Some("github")).unwrap();
db.set_update_available(id, Some("2.0.0"), Some("https://example.com/app-2.0.AppImage")).unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert_eq!(record.fuse_status.as_deref(), Some("fully_functional"));
assert_eq!(record.wayland_status.as_deref(), Some("native"));
assert_eq!(record.update_type.as_deref(), Some("github"));
assert_eq!(record.latest_version.as_deref(), Some("2.0.0"));
assert!(record.update_checked.is_some());
let with_updates = db.get_appimages_with_updates().unwrap();
assert_eq!(with_updates.len(), 1);
db.clear_update_available(id).unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert!(record.latest_version.is_none());
}
#[test]
fn test_launch_tracking() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage("/path/app.AppImage", "app.AppImage", Some(2), 1000, true, None).unwrap();
assert_eq!(db.get_launch_count(id).unwrap(), 0);
assert!(db.get_last_launched(id).unwrap().is_none());
db.record_launch(id, "desktop_entry").unwrap();
db.record_launch(id, "cli").unwrap();
assert_eq!(db.get_launch_count(id).unwrap(), 2);
assert!(db.get_last_launched(id).unwrap().is_some());
let events = db.get_launch_events(id).unwrap();
assert_eq!(events.len(), 2);
let sources: Vec<&str> = events.iter().map(|e| e.source.as_str()).collect();
assert!(sources.contains(&"desktop_entry"));
assert!(sources.contains(&"cli"));
}
#[test]
fn test_update_history() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage("/path/app.AppImage", "app.AppImage", Some(2), 1000, true, None).unwrap();
db.record_update(id, Some("1.0"), Some("2.0"), Some("full_download"), Some(50_000_000), true).unwrap();
let history = db.get_update_history(id).unwrap();
assert_eq!(history.len(), 1);
assert_eq!(history[0].from_version.as_deref(), Some("1.0"));
assert_eq!(history[0].to_version.as_deref(), Some("2.0"));
assert!(history[0].success);
}
// --- Migration tests ---
#[test]
fn test_fresh_database_creates_at_latest_version() {
let db = Database::open_in_memory().unwrap();
// Verify schema_version is at the latest (9)
let version: i32 = db.conn.query_row(
"SELECT version FROM schema_version LIMIT 1",
[],
|row| row.get(0),
).unwrap();
assert_eq!(version, 18);
// All tables that should exist after the full v1-v7 migration chain
let expected_tables = [
"appimages",
"orphaned_entries",
"scan_log",
"launch_events",
"update_history",
"duplicate_groups",
"duplicate_members",
"bundled_libraries",
"cve_matches",
"app_data_paths",
"config_backups",
"backup_entries",
"exported_reports",
"cve_notifications",
"catalog_sources",
"catalog_apps",
"sandbox_profiles",
"sandbox_profile_history",
"runtime_updates",
"system_modifications",
];
for table in &expected_tables {
let count: i32 = db.conn.query_row(
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?1",
params![table],
|row| row.get(0),
).unwrap();
assert_eq!(count, 1, "Expected table '{}' to exist", table);
}
}
#[test]
fn test_appimage_columns_include_analysis_status() {
let db = Database::open_in_memory().unwrap();
// Insert a record via upsert_appimage
let id = db.upsert_appimage(
"/tmp/analysis_test.AppImage",
"analysis_test.AppImage",
Some(2),
5000,
true,
None,
).unwrap();
// Retrieve and verify analysis_status exists and defaults to 'complete'
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert_eq!(
record.analysis_status.as_deref(),
Some("complete"),
"analysis_status should default to 'complete'"
);
}
#[test]
fn test_update_analysis_status() {
let db = Database::open_in_memory().unwrap();
let id = db.upsert_appimage(
"/tmp/status_test.AppImage",
"status_test.AppImage",
Some(2),
3000,
true,
None,
).unwrap();
// Update to "analyzing" and verify
db.update_analysis_status(id, "analyzing").unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert_eq!(
record.analysis_status.as_deref(),
Some("analyzing"),
"analysis_status should be 'analyzing' after update"
);
// Update back to "complete" and verify
db.update_analysis_status(id, "complete").unwrap();
let record = db.get_appimage_by_id(id).unwrap().unwrap();
assert_eq!(
record.analysis_status.as_deref(),
Some("complete"),
"analysis_status should be 'complete' after second update"
);
}
#[test]
fn test_upsert_and_retrieve() {
let db = Database::open_in_memory().unwrap();
let path = "/home/user/Apps/MyApp-3.2.1-x86_64.AppImage";
let filename = "MyApp-3.2.1-x86_64.AppImage";
let appimage_type = Some(2);
let size_bytes: i64 = 48_000_000;
let is_executable = true;
let file_modified = Some("2026-01-15 10:30:00");
let id = db.upsert_appimage(
path,
filename,
appimage_type,
size_bytes,
is_executable,
file_modified,
).unwrap();
// Retrieve by path and verify all basic fields match
let record = db.get_appimage_by_path(path).unwrap()
.expect("record should exist after upsert");
assert_eq!(record.id, id);
assert_eq!(record.path, path);
assert_eq!(record.filename, filename);
assert_eq!(record.appimage_type, appimage_type);
assert_eq!(record.size_bytes, size_bytes);
assert_eq!(record.is_executable, is_executable);
assert_eq!(record.file_modified.as_deref(), file_modified);
}
#[test]
fn test_remove_missing_cleans_nonexistent() {
let db = Database::open_in_memory().unwrap();
// Insert a record with a path that definitely does not exist on disk
let id = db.upsert_appimage(
"/absolutely/nonexistent/path/fake.AppImage",
"fake.AppImage",
Some(2),
1234,
true,
None,
).unwrap();
assert!(id > 0);
// Confirm it was inserted
assert_eq!(db.appimage_count().unwrap(), 1);
// remove_missing_appimages should remove it since the path does not exist
let removed = db.remove_missing_appimages().unwrap();
assert_eq!(removed.len(), 1);
assert_eq!(removed[0].path, "/absolutely/nonexistent/path/fake.AppImage");
// Verify the database is now empty
assert_eq!(db.appimage_count().unwrap(), 0);
let record = db.get_appimage_by_id(id).unwrap();
assert!(record.is_none(), "record should be gone after remove_missing_appimages");
}
}