diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs index 88345f6c..928a4121 100644 --- a/pbs-config/src/acl.rs +++ b/pbs-config/src/acl.rs @@ -337,7 +337,7 @@ impl AclTree { fn get_node(&self, path: &[&str]) -> Option<&AclTreeNode> { let mut node = &self.root; for outer in path { - for comp in outer.split("/") { + for comp in outer.split('/') { node = match node.children.get(comp) { Some(n) => n, None => return None, @@ -350,7 +350,7 @@ impl AclTree { fn get_node_mut(&mut self, path: &[&str]) -> Option<&mut AclTreeNode> { let mut node = &mut self.root; for outer in path { - for comp in outer.split("/") { + for comp in outer.split('/') { node = match node.children.get_mut(comp) { Some(n) => n, None => return None, @@ -363,7 +363,7 @@ impl AclTree { fn get_or_insert_node(&mut self, path: &[&str]) -> &mut AclTreeNode { let mut node = &mut self.root; for outer in path { - for comp in outer.split("/") { + for comp in outer.split('/') { node = node.children.entry(String::from(comp)).or_default(); } } @@ -666,7 +666,7 @@ impl AclTree { pub fn get_child_paths(&self, auth_id: &Authid, path: &[&str]) -> Result, Error> { let mut res = Vec::new(); - if let Some(node) = self.get_node(&path) { + if let Some(node) = self.get_node(path) { let path = path.join("/"); node.get_child_paths(path, auth_id, &mut res)?; } diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index a1c84b55..472fd5c3 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -486,7 +486,7 @@ fn backup_worker( print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref()); if pool_writer.contains_snapshot( datastore_name, - &info.backup_dir.backup_ns(), + info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { task_log!(worker, "skip snapshot {}", rel_path); @@ -512,7 +512,7 @@ fn backup_worker( if pool_writer.contains_snapshot( datastore_name, - &info.backup_dir.backup_ns(), + info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { task_log!(worker, "skip snapshot {}", rel_path); diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index ef60380b..ba17bb6f 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -88,7 +88,7 @@ impl TryFrom> for NamespaceMap { } impl NamespaceMap { - fn used_namespaces<'a>(&self, datastore: &str) -> HashSet { + fn used_namespaces(&self, datastore: &str) -> HashSet { let mut set = HashSet::new(); if let Some(mapping) = self.map.get(datastore) { for (ns, _) in mapping.values() { @@ -190,8 +190,8 @@ impl DataStoreMap { fn target_store(&self, source_datastore: &str) -> Option> { self.map .get(source_datastore) - .or_else(|| self.default.as_ref()) - .map(|store| Arc::clone(store)) + .or(self.default.as_ref()) + .map(Arc::clone) } fn get_targets( @@ -397,7 +397,7 @@ pub fn restore( let email = notify_user .as_ref() - .and_then(|userid| lookup_user_email(userid)) + .and_then(lookup_user_email) .or_else(|| lookup_user_email(&auth_id.clone().into())); task_log!(worker, "Mediaset '{media_set}'"); @@ -406,7 +406,7 @@ pub fn restore( let res = if snapshots.is_some() || namespaces { restore_list_worker( worker.clone(), - snapshots.unwrap_or_else(Vec::new), + snapshots.unwrap_or_default(), inventory, media_set_uuid, drive_config, @@ -521,7 +521,7 @@ fn restore_full_worker( &mut checked_chunks_map, restore_owner, &email, - &auth_id, + auth_id, )?; } @@ -589,7 +589,7 @@ fn check_snapshot_restorable( have_some_permissions = true; - if datastore.snapshot_path(&ns, &dir).exists() { + if datastore.snapshot_path(&ns, dir).exists() { task_warn!( worker, "found snapshot {snapshot} on target datastore/namespace, skipping...", @@ -603,7 +603,7 @@ fn check_snapshot_restorable( bail!("cannot restore {snapshot} to any target namespace due to permissions"); } - return Ok(can_restore_some); + Ok(can_restore_some) } fn restore_list_worker( @@ -670,13 +670,13 @@ fn restore_list_worker( let (store, snapshot) = store_snapshot.split_at(idx + 1); let store = &store[..idx]; // remove ':' - match parse_ns_and_snapshot(&snapshot) { + match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) => { match check_snapshot_restorable( &worker, &store_map, - &store, - &snapshot, + store, + snapshot, &ns, &dir, true, @@ -710,7 +710,7 @@ fn restore_list_worker( None => bail!("unexpected error"), // we already checked those }; let (media_id, file_num) = - if let Some((media_uuid, file_num)) = catalog.lookup_snapshot(store, &snapshot) { + if let Some((media_uuid, file_num)) = catalog.lookup_snapshot(store, snapshot) { let media_id = inventory.lookup_media(media_uuid).unwrap(); (media_id, file_num) } else { @@ -926,7 +926,7 @@ fn restore_list_worker( } for (datastore, _) in store_map.used_datastores().values() { - let tmp_path = media_set_tmpdir(&datastore, &media_set_uuid); + let tmp_path = media_set_tmpdir(datastore, &media_set_uuid); match std::fs::remove_dir_all(&tmp_path) { Ok(()) => {} Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} @@ -1193,15 +1193,12 @@ fn restore_partial_chunk_archive<'a>( let verify_and_write_channel = writer_pool.channel(); - loop { - let (digest, blob) = match decoder.next_chunk()? { - Some((digest, blob)) => (digest, blob), - None => break, - }; + while let Some((digest, blob)) = decoder.next_chunk()? { + worker.check_abort()?; if chunk_list.remove(&digest) { - verify_and_write_channel.send((blob, digest.clone()))?; + verify_and_write_channel.send((blob, digest))?; count += 1; } if chunk_list.is_empty() { @@ -1326,7 +1323,7 @@ pub fn restore_media( &mut catalog, checked_chunks_map, verbose, - &auth_id, + auth_id, )?; } @@ -1388,7 +1385,7 @@ fn restore_archive<'a>( &user_info, &datastore, &backup_ns, - &auth_id, + auth_id, Some(restore_owner), )?; let (owner, _group_lock) = datastore.create_locked_backup_group( @@ -1483,7 +1480,7 @@ fn restore_archive<'a>( .unwrap_or("_unused_") .to_string(), ) - .or_insert(HashSet::new()); + .or_default(); let chunks = if let Some(datastore) = datastore { restore_chunk_archive( @@ -1649,8 +1646,8 @@ fn restore_chunk_archive<'a>( worker.check_abort()?; if !checked_chunks.contains(&digest) { - verify_and_write_channel.send((blob, digest.clone()))?; - checked_chunks.insert(digest.clone()); + verify_and_write_channel.send((blob, digest))?; + checked_chunks.insert(digest); } chunks.push(digest); } @@ -1884,11 +1881,10 @@ pub fn fast_catalog_restore( let wanted = media_set .media_list() .iter() - .find(|e| match e { + .any(|e| match e { None => false, Some(uuid) => uuid == catalog_uuid, - }) - .is_some(); + }); if !wanted { task_log!( diff --git a/src/tape/changer/online_status_map.rs b/src/tape/changer/online_status_map.rs index a8c2e5e6..5bfa3b3c 100644 --- a/src/tape/changer/online_status_map.rs +++ b/src/tape/changer/online_status_map.rs @@ -135,7 +135,7 @@ pub fn update_online_status( for mut changer_config in changers { if let Some(changer) = changer { - if changer != &changer_config.name { + if changer != changer_config.name { continue; } found_changer = true; @@ -158,7 +158,7 @@ pub fn update_online_status( let vtapes: Vec = config.convert_to_typed_array("virtual")?; for mut vtape in vtapes { if let Some(changer) = changer { - if changer != &vtape.name { + if changer != vtape.name { continue; } found_changer = true; diff --git a/src/tape/drive/lto/mod.rs b/src/tape/drive/lto/mod.rs index 95b7ee1f..813e2179 100644 --- a/src/tape/drive/lto/mod.rs +++ b/src/tape/drive/lto/mod.rs @@ -49,7 +49,7 @@ pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result { if count <= *pos { - *pos = *pos - count; + *pos -= count; } else { bail!("backward_space_count_files failed: move before BOT"); } diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index f66542a2..7fd007be 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -18,6 +18,7 @@ use pbs_api_types::{parse_ns_and_snapshot, print_ns_and_snapshot, BackupDir, Bac use crate::tape::{file_formats::MediaSetLabel, MediaId}; +#[derive(Default)] pub struct DatastoreContent { pub snapshot_index: HashMap, // snapshot => file_nr pub chunk_index: HashMap<[u8; 32], u64>, // chunk => file_nr @@ -611,7 +612,7 @@ impl MediaCatalog { self.content .entry(store.to_string()) - .or_insert(DatastoreContent::new()); + .or_default(); self.current_archive = Some((uuid, file_number, store.to_string())); @@ -728,7 +729,7 @@ impl MediaCatalog { let content = self .content .entry(store.to_string()) - .or_insert(DatastoreContent::new()); + .or_default(); content.snapshot_index.insert(path, file_number); @@ -858,7 +859,7 @@ impl MediaCatalog { self.content .entry(store.to_string()) - .or_insert(DatastoreContent::new()); + .or_default(); self.current_archive = Some((uuid, file_number, store.to_string())); } @@ -895,7 +896,7 @@ impl MediaCatalog { let content = self .content .entry(store.to_string()) - .or_insert(DatastoreContent::new()); + .or_default(); content .snapshot_index @@ -1016,19 +1017,17 @@ impl MediaSetCatalog { pub fn list_snapshots(&self) -> impl Iterator { self.catalog_list .values() - .map(|catalog| { + .flat_map(|catalog| { catalog .content .iter() - .map(|(store, content)| { + .flat_map(|(store, content)| { content .snapshot_index .keys() .map(move |key| (store.as_str(), key.as_str())) }) - .flatten() }) - .flatten() } } diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index fa99c95c..0593feb4 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -432,14 +432,14 @@ impl MediaPool { res }); - free_media.pop().map(|e| e.clone()) + free_media.pop().cloned() } // Get next empty media pub fn next_empty_media(&self, media_list: &[BackupMedia]) -> Option { let mut empty_media = Vec::new(); - for media in media_list.into_iter() { + for media in media_list.iter() { if !self.location_is_available(media.location()) { continue; } @@ -472,7 +472,7 @@ impl MediaPool { ) -> Option { let mut expired_media = Vec::new(); - for media in media_list.into_iter() { + for media in media_list.iter() { if !self.location_is_available(media.location()) { continue; } diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index d87f7dec..25123720 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -106,7 +106,7 @@ impl PoolWriter { pub fn get_used_media_labels(&self) -> Result, Error> { let mut res = Vec::with_capacity(self.used_tapes.len()); for media_uuid in &self.used_tapes { - let media_info = self.pool.lookup_media(&media_uuid)?; + let media_info = self.pool.lookup_media(media_uuid)?; res.push(media_info.label_text().to_string()); } @@ -271,7 +271,7 @@ impl PoolWriter { self.catalog_set.lock().unwrap().append_catalog(catalog)?; - let media_set = media.media_set_label().clone().unwrap(); + let media_set = media.media_set_label().unwrap(); let encrypt_fingerprint = media_set .encryption_key_fingerprint @@ -390,7 +390,7 @@ impl PoolWriter { fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> { let media_set = self.pool.current_media_set(); - let mut media_list = &media_set.media_list()[..]; + let mut media_list = media_set.media_list(); if media_list.len() < 2 { return Ok(()); }