Files
pixstrip/pixstrip-core/src/executor.rs

847 lines
33 KiB
Rust

use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use rayon::prelude::*;
use crate::encoder::{EncoderOptions, OutputEncoder};
use crate::error::{PixstripError, Result};
use crate::loader::ImageLoader;
use crate::operations::adjustments::apply_adjustments;
use crate::operations::resize::resize_image_with_algorithm;
use crate::operations::watermark::apply_watermark;
use crate::operations::{Flip, Rotation};
use crate::pipeline::ProcessingJob;
use crate::types::ImageFormat;
#[derive(Debug, Clone)]
pub struct ProgressUpdate {
pub current: usize,
pub total: usize,
pub current_file: String,
pub succeeded_so_far: usize,
pub failed_so_far: usize,
}
#[derive(Debug, Clone)]
pub struct BatchResult {
pub total: usize,
pub succeeded: usize,
pub failed: usize,
pub cancelled: bool,
pub total_input_bytes: u64,
pub total_output_bytes: u64,
pub errors: Vec<(String, String)>,
pub elapsed_ms: u64,
/// Actual output file paths written by the executor (only successfully written files).
pub output_files: Vec<String>,
}
pub struct PipelineExecutor {
cancel_flag: Arc<AtomicBool>,
pause_flag: Arc<AtomicBool>,
thread_count: usize,
pause_on_error: bool,
}
impl PipelineExecutor {
pub fn new() -> Self {
Self {
cancel_flag: Arc::new(AtomicBool::new(false)),
pause_flag: Arc::new(AtomicBool::new(false)),
thread_count: num_cpus(),
pause_on_error: false,
}
}
pub fn with_cancel(cancel_flag: Arc<AtomicBool>) -> Self {
Self {
cancel_flag,
pause_flag: Arc::new(AtomicBool::new(false)),
thread_count: num_cpus(),
pause_on_error: false,
}
}
pub fn with_cancel_and_pause(
cancel_flag: Arc<AtomicBool>,
pause_flag: Arc<AtomicBool>,
) -> Self {
Self {
cancel_flag,
pause_flag,
thread_count: num_cpus(),
pause_on_error: false,
}
}
pub fn set_thread_count(&mut self, count: usize) {
self.thread_count = if count == 0 { num_cpus() } else { count };
}
pub fn set_pause_on_error(&mut self, pause: bool) {
self.pause_on_error = pause;
}
pub fn execute<F>(&self, job: &ProcessingJob, mut on_progress: F) -> Result<BatchResult>
where
F: FnMut(ProgressUpdate) + Send,
{
let start = Instant::now();
let total = job.sources.len();
if total == 0 {
return Ok(BatchResult {
total: 0,
succeeded: 0,
failed: 0,
cancelled: false,
total_input_bytes: 0,
total_output_bytes: 0,
errors: Vec::new(),
elapsed_ms: 0,
output_files: Vec::new(),
});
}
// Use single-threaded path when thread_count is 1
if self.thread_count <= 1 {
return self.execute_sequential(job, on_progress);
}
// Build a scoped rayon thread pool with the configured count
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(self.thread_count)
.build()
.map_err(|e| PixstripError::Processing {
operation: "thread_pool".into(),
reason: e.to_string(),
})?;
let completed = AtomicUsize::new(0);
let succeeded_count = AtomicUsize::new(0);
let failed_count = AtomicUsize::new(0);
let input_bytes = AtomicU64::new(0);
let output_bytes = AtomicU64::new(0);
let cancelled = AtomicBool::new(false);
let errors: Mutex<Vec<(String, String)>> = Mutex::new(Vec::new());
let written_files: Mutex<Vec<String>> = Mutex::new(Vec::new());
// Channel for progress updates from worker threads to the callback
let (tx, rx) = std::sync::mpsc::channel::<ProgressUpdate>();
let cancel_flag = &self.cancel_flag;
let pause_flag = &self.pause_flag;
let pause_on_error = self.pause_on_error;
// Run the parallel work in a scoped thread so we can drain progress concurrently
std::thread::scope(|scope| {
// Spawn the rayon pool work
let tx_clone = tx.clone();
let completed_ref = &completed;
let succeeded_ref = &succeeded_count;
let failed_ref = &failed_count;
let input_bytes_ref = &input_bytes;
let output_bytes_ref = &output_bytes;
let cancelled_ref = &cancelled;
let errors_ref = &errors;
let written_ref = &written_files;
let worker = scope.spawn(move || {
pool.install(|| {
job.sources.par_iter().enumerate().for_each(|(idx, source)| {
// Check cancel
if cancel_flag.load(Ordering::Relaxed) {
cancelled_ref.store(true, Ordering::Relaxed);
return;
}
// Wait while paused
while pause_flag.load(Ordering::Relaxed) {
std::thread::sleep(std::time::Duration::from_millis(100));
if cancel_flag.load(Ordering::Relaxed) {
cancelled_ref.store(true, Ordering::Relaxed);
return;
}
}
if cancelled_ref.load(Ordering::Relaxed) {
return;
}
let file_name = source
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let loader = ImageLoader::new();
let encoder = OutputEncoder::with_options(EncoderOptions {
progressive_jpeg: job.progressive_jpeg,
avif_speed: job.avif_speed,
output_dpi: job.output_dpi,
});
match Self::process_single_static(job, source, &loader, &encoder, idx) {
Ok((in_size, out_size, out_path)) => {
succeeded_ref.fetch_add(1, Ordering::Relaxed);
input_bytes_ref.fetch_add(in_size, Ordering::Relaxed);
output_bytes_ref.fetch_add(out_size, Ordering::Relaxed);
if !out_path.as_os_str().is_empty() {
if let Ok(mut wf) = written_ref.lock() {
wf.push(out_path.display().to_string());
}
}
}
Err(e) => {
failed_ref.fetch_add(1, Ordering::Relaxed);
if let Ok(mut errs) = errors_ref.lock() {
errs.push((file_name.clone(), e.to_string()));
}
if pause_on_error {
pause_flag.store(true, Ordering::Relaxed);
}
}
}
// Send progress after processing so counts are consistent
let current = completed_ref.fetch_add(1, Ordering::Relaxed) + 1;
let _ = tx_clone.send(ProgressUpdate {
current,
total,
current_file: file_name,
succeeded_so_far: succeeded_ref.load(Ordering::Relaxed),
failed_so_far: failed_ref.load(Ordering::Relaxed),
});
});
});
// Drop sender so the receiver loop ends
drop(tx_clone);
});
// Drop the original sender so only the worker's clone keeps the channel open
drop(tx);
// Drain progress updates on this thread (the calling thread)
for update in rx {
on_progress(update);
}
// Wait for worker to finish (it already has by the time rx is drained)
let _ = worker.join();
});
Ok(BatchResult {
total,
succeeded: succeeded_count.load(Ordering::Relaxed),
failed: failed_count.load(Ordering::Relaxed),
cancelled: cancelled.load(Ordering::Relaxed),
total_input_bytes: input_bytes.load(Ordering::Relaxed),
total_output_bytes: output_bytes.load(Ordering::Relaxed),
errors: errors.into_inner().unwrap_or_default(),
elapsed_ms: start.elapsed().as_millis() as u64,
output_files: written_files.into_inner().unwrap_or_default(),
})
}
fn execute_sequential<F>(&self, job: &ProcessingJob, mut on_progress: F) -> Result<BatchResult>
where
F: FnMut(ProgressUpdate) + Send,
{
let start = Instant::now();
let total = job.sources.len();
let loader = ImageLoader::new();
let encoder = OutputEncoder::with_options(EncoderOptions {
progressive_jpeg: job.progressive_jpeg,
avif_speed: job.avif_speed,
output_dpi: job.output_dpi,
});
let mut result = BatchResult {
total,
succeeded: 0,
failed: 0,
cancelled: false,
total_input_bytes: 0,
total_output_bytes: 0,
errors: Vec::new(),
elapsed_ms: 0,
output_files: Vec::new(),
};
for (i, source) in job.sources.iter().enumerate() {
if self.cancel_flag.load(Ordering::Relaxed) {
result.cancelled = true;
break;
}
while self.pause_flag.load(Ordering::Relaxed) {
std::thread::sleep(std::time::Duration::from_millis(100));
if self.cancel_flag.load(Ordering::Relaxed) {
result.cancelled = true;
break;
}
}
if result.cancelled {
break;
}
let file_name = source
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
on_progress(ProgressUpdate {
current: i + 1,
total,
current_file: file_name.clone(),
succeeded_so_far: result.succeeded,
failed_so_far: result.failed,
});
match Self::process_single_static(job, source, &loader, &encoder, i) {
Ok((input_size, output_size, out_path)) => {
result.succeeded += 1;
result.total_input_bytes += input_size;
result.total_output_bytes += output_size;
if !out_path.as_os_str().is_empty() {
result.output_files.push(out_path.display().to_string());
}
}
Err(e) => {
result.failed += 1;
result.errors.push((file_name, e.to_string()));
if self.pause_on_error {
self.pause_flag.store(true, Ordering::Relaxed);
}
}
}
}
result.elapsed_ms = start.elapsed().as_millis() as u64;
Ok(result)
}
fn process_single_static(
job: &ProcessingJob,
source: &crate::types::ImageSource,
loader: &ImageLoader,
encoder: &OutputEncoder,
index: usize,
) -> std::result::Result<(u64, u64, std::path::PathBuf), PixstripError> {
let input_size = std::fs::metadata(&source.path)
.map(|m| m.len())
.unwrap_or(0);
// Metadata stripping via little_exif only works for JPEG/TIFF.
// For other formats, force the pixel path so re-encoding strips metadata.
let metadata_needs_reencode = job.metadata.as_ref().is_some_and(|m| {
!matches!(m, crate::operations::MetadataConfig::KeepAll)
&& !matches!(
source.original_format,
Some(ImageFormat::Jpeg) | Some(ImageFormat::Tiff)
)
});
// Fast path: if no pixel processing needed (rename-only or rename+metadata),
// just copy the file instead of decoding/re-encoding.
if !job.needs_pixel_processing() && !metadata_needs_reencode {
let output_path = if let Some(ref rename) = job.rename {
let stem = source.path.file_stem().and_then(|s| s.to_str()).unwrap_or("output");
let ext = source.path.extension().and_then(|e| e.to_str()).unwrap_or("jpg");
if let Some(ref template) = rename.template {
// Read dimensions without full decode for {width}/{height} templates
let dims = image::ImageReader::open(&source.path)
.ok()
.and_then(|r| r.with_guessed_format().ok())
.and_then(|r| r.into_dimensions().ok());
// Apply regex find/replace on stem (matching the pixel path)
let working_stem = crate::operations::rename::apply_regex_replace(
stem, &rename.regex_find, &rename.regex_replace,
);
let new_name = crate::operations::rename::apply_template_full(
template, &working_stem, ext,
rename.counter_start.saturating_add(index as u32),
dims, Some(ext), Some(&source.path), None,
);
let new_name = if rename.case_mode > 0 {
if let Some(dot_pos) = new_name.rfind('.') {
let (name_part, ext_part) = new_name.split_at(dot_pos);
format!("{}{}", crate::operations::rename::apply_case_conversion(name_part, rename.case_mode), ext_part)
} else {
crate::operations::rename::apply_case_conversion(&new_name, rename.case_mode)
}
} else {
new_name
};
job.output_dir.join(sanitize_filename(&new_name))
} else {
let new_name = rename.apply_simple(stem, ext, (index as u32).saturating_add(1));
job.output_dir.join(sanitize_filename(&new_name))
}
} else {
job.output_path_for(source, None)
};
// Prevent overwriting the source file
if paths_are_same(&source.path, &output_path) {
return Err(PixstripError::Processing {
operation: "output".into(),
reason: format!("output path is the same as source: {}", source.path.display()),
});
}
let output_path = match job.overwrite_behavior {
crate::operations::OverwriteAction::Skip if output_path.exists() => {
return Ok((input_size, 0, std::path::PathBuf::new()));
}
crate::operations::OverwriteAction::AutoRename if output_path.exists() => {
find_unique_path(&output_path)
}
_ => output_path,
};
if let Some(parent) = output_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| {
cleanup_placeholder(&output_path);
PixstripError::Io(e)
})?;
}
std::fs::copy(&source.path, &output_path).map_err(|e| {
cleanup_placeholder(&output_path);
PixstripError::Io(e)
})?;
// Metadata handling on the copy
if let Some(ref meta_config) = job.metadata {
match meta_config {
crate::operations::MetadataConfig::KeepAll => {
// Already a copy - metadata preserved
}
crate::operations::MetadataConfig::StripAll => {
if !strip_all_metadata(&output_path) {
eprintln!("Warning: failed to strip metadata from {}", output_path.display());
}
}
_ => {
strip_selective_metadata(&output_path, meta_config);
}
}
}
let output_size = std::fs::metadata(&output_path).map(|m| m.len()).unwrap_or(0);
return Ok((input_size, output_size, output_path));
}
// Load image
let mut img = loader.load_pixels(&source.path)?;
// Rotation
if let Some(ref rotation) = job.rotation {
img = match rotation {
Rotation::None => img,
Rotation::Cw90 => img.rotate90(),
Rotation::Cw180 => img.rotate180(),
Rotation::Cw270 => img.rotate270(),
Rotation::AutoOrient => auto_orient_from_exif(img, &source.path),
};
}
// Flip
if let Some(ref flip) = job.flip {
img = match flip {
Flip::None => img,
Flip::Horizontal => img.fliph(),
Flip::Vertical => img.flipv(),
};
}
// Resize
if let Some(ref config) = job.resize {
img = resize_image_with_algorithm(&img, config, job.resize_algorithm)?;
}
// Adjustments (brightness, contrast, saturation, effects, crop, padding)
if let Some(ref adj) = job.adjustments {
if !adj.is_noop() {
img = apply_adjustments(img, adj)?;
}
}
// Determine output format
let output_format = if let Some(ref convert) = job.convert {
let input_fmt = source.original_format.unwrap_or(ImageFormat::Jpeg);
convert.output_format(input_fmt)
} else {
source.original_format.unwrap_or(ImageFormat::Jpeg)
};
// Determine quality
let quality = job.compress.as_ref().map(|c| match c {
crate::operations::CompressConfig::Preset(preset) => {
encoder.quality_for_format(output_format, preset)
}
crate::operations::CompressConfig::Custom {
jpeg_quality,
png_level,
webp_quality,
avif_quality,
} => match output_format {
ImageFormat::Jpeg => jpeg_quality.unwrap_or(85),
ImageFormat::Png => png_level.unwrap_or(6),
ImageFormat::WebP => webp_quality.map(|q| q.clamp(0.0, 100.0) as u8).unwrap_or(80),
ImageFormat::Avif => avif_quality.map(|q| q.clamp(0.0, 100.0) as u8).unwrap_or(50),
_ => 85,
},
});
// Determine output path (with rename if configured)
let output_path = if let Some(ref rename) = job.rename {
let stem = source
.path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("output");
let ext = output_format.extension();
if let Some(ref template) = rename.template {
let dims = Some((img.width(), img.height()));
let original_ext = source.path.extension()
.and_then(|e| e.to_str());
// Apply regex on the stem before template expansion
let working_stem = crate::operations::rename::apply_regex_replace(
stem, &rename.regex_find, &rename.regex_replace,
);
let new_name = crate::operations::rename::apply_template_full(
template,
&working_stem,
ext,
rename.counter_start.saturating_add(index as u32),
dims,
original_ext,
Some(&source.path),
None,
);
// Apply case conversion to the final name (without extension)
let new_name = if rename.case_mode > 0 {
if let Some(dot_pos) = new_name.rfind('.') {
let (name_part, ext_part) = new_name.split_at(dot_pos);
format!("{}{}", crate::operations::rename::apply_case_conversion(name_part, rename.case_mode), ext_part)
} else {
crate::operations::rename::apply_case_conversion(&new_name, rename.case_mode)
}
} else {
new_name
};
job.output_dir.join(sanitize_filename(&new_name))
} else {
let new_name = rename.apply_simple(stem, ext, (index as u32).saturating_add(1));
job.output_dir.join(sanitize_filename(&new_name))
}
} else {
job.output_path_for(source, Some(output_format))
};
// Prevent overwriting the source file
if paths_are_same(&source.path, &output_path) {
return Err(PixstripError::Processing {
operation: "output".into(),
reason: format!("output path is the same as source: {}", source.path.display()),
});
}
// Handle overwrite behavior
let output_path = match job.overwrite_behavior {
crate::operations::OverwriteAction::Skip => {
if output_path.exists() {
// Return 0 bytes written - file was skipped
return Ok((input_size, 0, std::path::PathBuf::new()));
}
output_path
}
crate::operations::OverwriteAction::AutoRename => {
if output_path.exists() {
find_unique_path(&output_path)
} else {
output_path
}
}
crate::operations::OverwriteAction::Overwrite => output_path,
};
// Ensure output directory exists
if let Some(parent) = output_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| {
cleanup_placeholder(&output_path);
PixstripError::Io(e)
})?;
}
// Watermark (after compress settings determined, before encode)
if let Some(ref config) = job.watermark {
img = apply_watermark(img, config).map_err(|e| {
cleanup_placeholder(&output_path);
e
})?;
}
// Encode and save
encoder.encode_to_file(&img, &output_path, output_format, quality).map_err(|e| {
cleanup_placeholder(&output_path);
e
})?;
// Metadata handling: re-encoding strips all EXIF by default.
// KeepAll: copy everything back from source.
// Privacy/Custom: copy metadata back, then selectively strip certain tags.
// StripAll: do nothing (already stripped by re-encoding).
// Note: little_exif only supports JPEG and TIFF metadata manipulation.
if let Some(ref meta_config) = job.metadata {
let format_supports_exif = matches!(
output_format,
ImageFormat::Jpeg | ImageFormat::Tiff
);
match meta_config {
crate::operations::MetadataConfig::KeepAll => {
if format_supports_exif {
if !copy_metadata_from_source(&source.path, &output_path) {
eprintln!("Warning: failed to copy metadata to {}", output_path.display());
}
}
// For non-JPEG/TIFF formats, metadata is lost during re-encoding
// and cannot be restored. This is a known limitation.
}
crate::operations::MetadataConfig::StripAll => {
// Already stripped by re-encoding - nothing to do
}
_ => {
// Privacy or Custom: copy all metadata back, then strip unwanted tags
if format_supports_exif {
if !copy_metadata_from_source(&source.path, &output_path) {
eprintln!("Warning: failed to copy metadata to {}", output_path.display());
}
strip_selective_metadata(&output_path, meta_config);
}
}
}
}
let output_size = std::fs::metadata(&output_path)
.map(|m| m.len())
.unwrap_or(0);
Ok((input_size, output_size, output_path))
}
}
impl Default for PipelineExecutor {
fn default() -> Self {
Self::new()
}
}
fn num_cpus() -> usize {
std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
}
/// Read EXIF orientation tag and apply the appropriate rotation/flip.
/// EXIF orientation values:
/// 1 = Normal, 2 = Flipped horizontal, 3 = Rotated 180,
/// 4 = Flipped vertical, 5 = Transposed (flip H + rotate 270),
/// 6 = Rotated 90 CW, 7 = Transverse (flip H + rotate 90),
/// 8 = Rotated 270 CW
fn auto_orient_from_exif(
img: image::DynamicImage,
path: &std::path::Path,
) -> image::DynamicImage {
let Ok(metadata) = little_exif::metadata::Metadata::new_from_path(path) else {
return img;
};
let endian = metadata.get_endian();
let Some(tag) = metadata.get_tag(&little_exif::exif_tag::ExifTag::Orientation(Vec::new())) else {
return img;
};
let bytes = tag.value_as_u8_vec(endian);
if bytes.len() < 2 {
return img;
}
// Orientation is a 16-bit unsigned integer
let orientation = match endian {
little_exif::endian::Endian::Little => u16::from_le_bytes([bytes[0], bytes[1]]),
little_exif::endian::Endian::Big => u16::from_be_bytes([bytes[0], bytes[1]]),
};
match orientation {
1 => img, // Normal
2 => img.fliph(), // Flipped horizontal
3 => img.rotate180(), // Rotated 180
4 => img.flipv(), // Flipped vertical
5 => img.fliph().rotate270(), // Transposed
6 => img.rotate90(), // Rotated 90 CW
7 => img.fliph().rotate90(), // Transverse
8 => img.rotate270(), // Rotated 270 CW
_ => img,
}
}
/// Strip path separators and parent-directory components from a user-provided filename.
/// Prevents path traversal attacks via rename templates (e.g., "../../etc/passwd").
fn sanitize_filename(name: &str) -> String {
let path = std::path::Path::new(name);
path.file_name()
.and_then(|f| f.to_str())
.unwrap_or("output")
.to_string()
}
/// Check if two paths refer to the same file (resolves symlinks and relative paths).
fn paths_are_same(a: &std::path::Path, b: &std::path::Path) -> bool {
match (a.canonicalize(), b.canonicalize()) {
(Ok(ca), Ok(cb)) => ca == cb,
_ => {
// If canonicalize fails (output file doesn't exist yet),
// canonicalize parent directories and compare with filename appended
let resolve = |p: &std::path::Path| -> Option<std::path::PathBuf> {
let parent = p.parent()?;
let name = p.file_name()?;
Some(parent.canonicalize().ok()?.join(name))
};
match (resolve(a), resolve(b)) {
(Some(ra), Some(rb)) => ra == rb,
_ => a.as_os_str() == b.as_os_str(),
}
}
}
}
/// Remove a 0-byte placeholder file created by find_unique_path on error.
fn cleanup_placeholder(path: &std::path::Path) {
if let Ok(meta) = std::fs::metadata(path) {
if meta.len() == 0 {
let _ = std::fs::remove_file(path);
}
}
}
fn find_unique_path(path: &std::path::Path) -> std::path::PathBuf {
let stem = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("output");
let ext = path
.extension()
.and_then(|e| e.to_str())
.unwrap_or("bin");
let parent = path.parent().unwrap_or_else(|| std::path::Path::new("."));
// Use O_CREAT | O_EXCL via create_new() to atomically reserve the path,
// preventing TOCTOU races when multiple threads auto-rename concurrently.
for i in 1u32..10000 {
let candidate = parent.join(format!("{}_{}.{}", stem, i, ext));
match std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&candidate)
{
Ok(mut f) => {
// Write a marker byte so cleanup_placeholder (which only removes
// 0-byte files) won't delete our reservation before the real write
let _ = std::io::Write::write_all(&mut f, b"~");
return candidate;
}
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => continue,
Err(_) => continue,
}
}
// Extremely unlikely fallback - use timestamp for uniqueness
parent.join(format!("{}_{}.{}", stem, std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis())
.unwrap_or(0), ext))
}
fn copy_metadata_from_source(source: &std::path::Path, output: &std::path::Path) -> bool {
let Ok(metadata) = little_exif::metadata::Metadata::new_from_path(source) else {
return false;
};
metadata.write_to_file(output).is_ok()
}
fn strip_all_metadata(path: &std::path::Path) -> bool {
let empty = little_exif::metadata::Metadata::new();
empty.write_to_file(path).is_ok()
}
fn strip_selective_metadata(
path: &std::path::Path,
config: &crate::operations::MetadataConfig,
) -> bool {
use little_exif::exif_tag::ExifTag;
use little_exif::metadata::Metadata;
// Read the metadata we just wrote back
let Ok(source_meta) = Metadata::new_from_path(path) else {
return false;
};
// Build a set of tag IDs to strip
let mut strip_ids: Vec<u16> = Vec::new();
if config.should_strip_gps() {
// GPSInfo pointer (0x8825) - removing it strips the GPS sub-IFD reference
strip_ids.push(ExifTag::GPSInfo(Vec::new()).as_u16());
}
if config.should_strip_camera() {
strip_ids.push(ExifTag::Make(String::new()).as_u16());
strip_ids.push(ExifTag::Model(String::new()).as_u16());
strip_ids.push(ExifTag::LensModel(String::new()).as_u16());
strip_ids.push(ExifTag::LensMake(String::new()).as_u16());
strip_ids.push(ExifTag::SerialNumber(String::new()).as_u16());
strip_ids.push(ExifTag::LensSerialNumber(String::new()).as_u16());
strip_ids.push(ExifTag::LensInfo(Vec::new()).as_u16());
}
if config.should_strip_software() {
strip_ids.push(ExifTag::Software(String::new()).as_u16());
strip_ids.push(ExifTag::MakerNote(Vec::new()).as_u16());
}
if config.should_strip_timestamps() {
strip_ids.push(ExifTag::ModifyDate(String::new()).as_u16());
strip_ids.push(ExifTag::DateTimeOriginal(String::new()).as_u16());
strip_ids.push(ExifTag::CreateDate(String::new()).as_u16());
strip_ids.push(ExifTag::SubSecTime(String::new()).as_u16());
strip_ids.push(ExifTag::SubSecTimeOriginal(String::new()).as_u16());
strip_ids.push(ExifTag::SubSecTimeDigitized(String::new()).as_u16());
strip_ids.push(ExifTag::OffsetTime(String::new()).as_u16());
strip_ids.push(ExifTag::OffsetTimeOriginal(String::new()).as_u16());
strip_ids.push(ExifTag::OffsetTimeDigitized(String::new()).as_u16());
}
if config.should_strip_copyright() {
strip_ids.push(ExifTag::Copyright(String::new()).as_u16());
strip_ids.push(ExifTag::Artist(String::new()).as_u16());
strip_ids.push(ExifTag::OwnerName(String::new()).as_u16());
}
// Build new metadata with only the tags we want to keep
let mut new_meta = Metadata::new();
for tag in source_meta.data() {
if !strip_ids.contains(&tag.as_u16()) {
new_meta.set_tag(tag.clone());
}
}
new_meta.write_to_file(path).is_ok()
}