From 7e25b9aaaa6d6d63d3fa86265d56ef9ef1b0766f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Mon, 30 Nov 2020 16:27:21 +0100 Subject: [PATCH] verify: use same progress as pull MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit percentage of verified groups, interpolating based on snapshot count within the group. in most cases, this will also be closer to 'real' progress since added snapshots (those which will be verified) in active backup groups will be roughly evenly distributed, while number of total snapshots per group will be heavily skewed towards those groups which have existed the longest, even though most of those old snapshots will only be re-verified very infrequently. Signed-off-by: Fabian Grünbichler --- src/api2/admin/datastore.rs | 4 +-- src/backup/verify.rs | 59 ++++++++++++++++--------------------- 2 files changed, 28 insertions(+), 35 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index bce58a78..df60dab6 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -687,12 +687,12 @@ pub fn verify( } res } else if let Some(backup_group) = backup_group { - let (_count, failed_dirs) = verify_backup_group( + let failed_dirs = verify_backup_group( datastore, &backup_group, verified_chunks, corrupt_chunks, - None, + &mut StoreProgress::new(1), worker.clone(), worker.upid(), None, diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 21a31a3f..7ba8d56a 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -10,6 +10,7 @@ use crate::{ api2::types::*, backup::{ DataStore, + StoreProgress, DataBlob, BackupGroup, BackupDir, @@ -425,11 +426,11 @@ pub fn verify_backup_group( group: &BackupGroup, verified_chunks: Arc>>, corrupt_chunks: Arc>>, - progress: Option<(usize, usize)>, // (done, snapshot_count) + progress: &mut StoreProgress, worker: Arc, upid: &UPID, filter: Option<&dyn Fn(&BackupManifest) -> bool>, -) -> Result<(usize, Vec), Error> { +) -> Result, Error> { let mut errors = Vec::new(); let mut list = match group.list_backups(&datastore.base_path()) { @@ -442,19 +443,17 @@ pub fn verify_backup_group( group, err, ); - return Ok((0, errors)); + return Ok(errors); } }; - task_log!(worker, "verify group {}:{}", datastore.name(), group); + let snapshot_count = list.len(); + task_log!(worker, "verify group {}:{} ({} snapshots)", datastore.name(), group, snapshot_count); - let (done, snapshot_count) = progress.unwrap_or((0, list.len())); + progress.group_snapshots = snapshot_count as u64; - let mut count = 0; BackupInfo::sort_list(&mut list, false); // newest first - for info in list { - count += 1; - + for (pos, info) in list.into_iter().enumerate() { if !verify_backup_dir( datastore.clone(), &info.backup_dir, @@ -466,20 +465,15 @@ pub fn verify_backup_group( )? { errors.push(info.backup_dir.to_string()); } - if snapshot_count != 0 { - let pos = done + count; - let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64); - task_log!( - worker, - "percentage done: {:.2}% ({} of {} snapshots)", - percentage, - pos, - snapshot_count, - ); - } + progress.done_snapshots = pos as u64 + 1; + task_log!( + worker, + "percentage done: {}", + progress + ); } - Ok((count, errors)) + Ok(errors) } /// Verify all (owned) backups inside a datastore @@ -551,34 +545,33 @@ pub fn verify_all_backups( list.sort_unstable(); - let mut snapshot_count = 0; - for group in list.iter() { - snapshot_count += group.list_backups(&datastore.base_path())?.len(); - } - // start with 16384 chunks (up to 65GB) let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); // start with 64 chunks since we assume there are few corrupt ones let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); - task_log!(worker, "found {} snapshots", snapshot_count); + let group_count = list.len(); + task_log!(worker, "found {} groups", group_count); - let mut done = 0; - for group in list { - let (count, mut group_errors) = verify_backup_group( + let mut progress = StoreProgress::new(group_count as u64); + + for (pos, group) in list.into_iter().enumerate() { + progress.done_groups = pos as u64; + progress.done_snapshots = 0; + progress.group_snapshots = 0; + + let mut group_errors = verify_backup_group( datastore.clone(), &group, verified_chunks.clone(), corrupt_chunks.clone(), - Some((done, snapshot_count)), + &mut progress, worker.clone(), upid, filter, )?; errors.append(&mut group_errors); - - done += count; } Ok(errors)