diff --git a/pbs-config/src/prune.rs b/pbs-config/src/prune.rs index e9a6ed58..0680d399 100644 --- a/pbs-config/src/prune.rs +++ b/pbs-config/src/prune.rs @@ -35,7 +35,7 @@ pub fn lock_config() -> Result { pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> { let content = proxmox_sys::fs::file_read_optional_string(PRUNE_CFG_FILENAME)?; - let content = content.unwrap_or_else(String::new); + let content = content.unwrap_or_default(); let digest = openssl::sha::sha256(content.as_bytes()); let data = CONFIG.parse(PRUNE_CFG_FILENAME, &content)?; diff --git a/pbs-tools/src/format.rs b/pbs-tools/src/format.rs index aeaa3e03..038362a0 100644 --- a/pbs-tools/src/format.rs +++ b/pbs-tools/src/format.rs @@ -19,7 +19,7 @@ pub fn render_backup_file_list>(files: &[S]) -> String { .map(|v| strip_server_file_extension(v.borrow())) .collect(); - files.sort(); + files.sort_unstable(); files.join(" ") } diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index e5048af4..d0c7b52f 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -163,7 +163,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let path = required_string_param(¶m, "snapshot")?; let archive_name = required_string_param(¶m, "archive-name")?; - let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?; + let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; let crypto = crypto_parameters(¶m)?; diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index 12c598fe..ada555a2 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -176,7 +176,7 @@ pub async fn dir_or_last_from_group( match path.parse::()? { BackupPart::Dir(dir) => Ok(dir), BackupPart::Group(group) => { - api_datastore_latest_snapshot(&client, repo.store(), ns, group).await + api_datastore_latest_snapshot(client, repo.store(), ns, group).await } } } @@ -1245,7 +1245,7 @@ async fn restore(param: Value) -> Result { let ns = optional_ns_param(¶m)?; let path = json::required_string_param(¶m, "snapshot")?; - let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?; + let backup_dir = dir_or_last_from_group(&client, &repo, &ns, path).await?; let target = json::required_string_param(¶m, "target")?; let target = if target == "-" { None } else { Some(target) }; diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index 760b05ac..268be580 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -205,7 +205,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let backup_ns = optional_ns_param(¶m)?; let path = required_string_param(¶m, "snapshot")?; - let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?; + let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; let keyfile = param["keyfile"].as_str().map(PathBuf::from); let crypt_config = match keyfile { diff --git a/proxmox-file-restore/src/block_driver.rs b/proxmox-file-restore/src/block_driver.rs index 0b5face9..eb6de82c 100644 --- a/proxmox-file-restore/src/block_driver.rs +++ b/proxmox-file-restore/src/block_driver.rs @@ -204,7 +204,6 @@ pub fn complete_block_driver_ids( ALL_DRIVERS .iter() .map(BlockDriverType::resolve) - .map(|d| d.list()) - .flatten() + .flat_map(|d| d.list()) .collect() } diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs index b60282f8..a1eea832 100644 --- a/proxmox-rest-server/src/worker_task.rs +++ b/proxmox-rest-server/src/worker_task.rs @@ -262,10 +262,8 @@ pub fn rotate_task_log_archive( } } } - } else { - if let Err(err) = std::fs::remove_file(&file_name) { - log::error!("could not remove {:?}: {}", file_name, err); - } + } else if let Err(err) = std::fs::remove_file(&file_name) { + log::error!("could not remove {:?}: {}", file_name, err); } } } @@ -966,7 +964,7 @@ impl WorkerTask { /// Set progress indicator pub fn progress(&self, progress: f64) { - if progress >= 0.0 && progress <= 1.0 { + if (0.0..=1.0).contains(&progress) { let mut data = self.data.lock().unwrap(); data.progress = progress; } else { diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs index 1957b143..a3b7cfa4 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs @@ -107,14 +107,14 @@ impl Bucket { Bucket::RawFs(_) => ty == "raw", Bucket::ZPool(data) => { if let Some(ref comp) = comp.get(0) { - ty == "zpool" && comp.as_ref() == &data.name + ty == "zpool" && comp.as_ref() == data.name } else { false } } Bucket::LVM(data) => { if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) { - ty == "lvm" && vg.as_ref() == &data.vg_name && lv.as_ref() == &data.lv_name + ty == "lvm" && vg.as_ref() == data.vg_name && lv.as_ref() == data.lv_name } else { false } diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs index 6a13f38a..78ec31c8 100644 --- a/pxar-bin/src/main.rs +++ b/pxar-bin/src/main.rs @@ -147,7 +147,7 @@ fn extract_archive( feature_flags.remove(Flags::WITH_SOCKETS); } - let pattern = pattern.unwrap_or_else(Vec::new); + let pattern = pattern.unwrap_or_default(); let target = target.as_ref().map_or_else(|| ".", String::as_str); let mut match_list = Vec::new(); @@ -297,7 +297,7 @@ async fn create_archive( entries_max: isize, ) -> Result<(), Error> { let patterns = { - let input = exclude.unwrap_or_else(Vec::new); + let input = exclude.unwrap_or_default(); let mut patterns = Vec::with_capacity(input.len()); for entry in input { patterns.push( diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 952fe2e0..44208a4c 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -1557,7 +1557,7 @@ pub fn catalog( &backup_dir.group, )?; - let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; let file_name = CATALOG_NAME; @@ -1939,7 +1939,7 @@ pub fn get_notes( &backup_dir.group, )?; - let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; let (manifest, _) = backup_dir.load_manifest()?; @@ -1992,7 +1992,7 @@ pub fn set_notes( &backup_dir.group, )?; - let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; backup_dir .update_manifest(|manifest| { @@ -2042,7 +2042,7 @@ pub fn get_protection( &backup_dir.group, )?; - let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; Ok(backup_dir.is_protected()) } @@ -2090,7 +2090,7 @@ pub fn set_protection( &backup_dir.group, )?; - let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; datastore.update_protection(&backup_dir, protected) } diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index ffc3696d..6f08474e 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -230,7 +230,7 @@ pub fn do_tape_backup_job( if let Err(err) = job.finish(status) { eprintln!( "could not finish job state for {}: {}", - job.jobtype().to_string(), + job.jobtype(), err ); } diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index b45c3456..aadd7c93 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -837,10 +837,7 @@ async fn schedule_task_log_rotate() { if !check_schedule(worker_type, schedule, job_id) { // if we never ran the rotation, schedule instantly match jobstate::JobState::load(worker_type, job_id) { - Ok(state) => match state { - jobstate::JobState::Created { .. } => {} - _ => return, - }, + Ok(jobstate::JobState::Created { .. }) => {} _ => return, } } @@ -1183,10 +1180,6 @@ fn gather_disk_stats(disk_manager: Arc, path: &Path, rrd_prefix: &st } // Rate Limiter lookup - -// Test WITH -// proxmox-backup-client restore vm/201/2021-10-22T09:55:56Z drive-scsi0.img img1.img --repository localhost:store2 - async fn run_traffic_control_updater() { loop { let delay_target = Instant::now() + Duration::from_secs(1); diff --git a/src/bin/proxmox_backup_debug/recover.rs b/src/bin/proxmox_backup_debug/recover.rs index 23366b86..43ece96a 100644 --- a/src/bin/proxmox_backup_debug/recover.rs +++ b/src/bin/proxmox_backup_debug/recover.rs @@ -3,7 +3,6 @@ use std::io::{Read, Seek, SeekFrom, Write}; use std::path::Path; use anyhow::{bail, format_err, Error}; -use serde_json::Value; use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface}; use proxmox_schema::api; @@ -69,7 +68,6 @@ fn recover_index( ignore_missing_chunks: bool, ignore_corrupt_chunks: bool, output_path: Option, - _param: Value, ) -> Result<(), Error> { let file_path = Path::new(&file); let chunks_path = Path::new(&chunks); @@ -150,7 +148,7 @@ fn recover_index( } Err(err) => { if ignore_missing_chunks && err.kind() == std::io::ErrorKind::NotFound { - create_zero_chunk(format!("is missing"))? + create_zero_chunk("is missing".to_string())? } else { bail!("could not open chunk file - {}", err); } diff --git a/src/server/gc_job.rs b/src/server/gc_job.rs index 693f9fd5..1b859ef0 100644 --- a/src/server/gc_job.rs +++ b/src/server/gc_job.rs @@ -42,7 +42,7 @@ pub fn do_garbage_collection_job( if let Err(err) = job.finish(status) { eprintln!( "could not finish job state for {}: {}", - job.jobtype().to_string(), + job.jobtype(), err ); } diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 2a73f017..35352e06 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -58,7 +58,7 @@ pub fn prune_datastore( for group in ListAccessibleBackupGroups::new_with_privs( &datastore, - ns.clone(), + ns, max_depth, Some(PRIV_DATASTORE_MODIFY), // overides the owner check Some(PRIV_DATASTORE_PRUNE), // additionally required if owner @@ -190,7 +190,7 @@ pub fn do_prune_job( if let Err(err) = job.finish(status) { eprintln!( "could not finish job state for {}: {}", - job.jobtype().to_string(), + job.jobtype(), err ); } diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs index 64584e3d..778a6935 100644 --- a/src/server/verify_job.rs +++ b/src/server/verify_job.rs @@ -77,7 +77,7 @@ pub fn do_verification_job( if let Err(err) = job.finish(status) { eprintln!( "could not finish job state for {}: {}", - job.jobtype().to_string(), + job.jobtype(), err ); } diff --git a/src/tape/drive/mod.rs b/src/tape/drive/mod.rs index d72021c1..8feba353 100644 --- a/src/tape/drive/mod.rs +++ b/src/tape/drive/mod.rs @@ -458,7 +458,7 @@ pub fn request_and_load_media( let label_string = format!( "{} ({})", media_id.label.label_text, - media_id.label.uuid.to_string(), + media_id.label.uuid, ); TapeRequestError::WrongLabel(label_string) } diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index 38fdcc6e..ba17f066 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -499,7 +499,7 @@ impl MediaCatalog { }; if self.log_to_stdout { - println!("L|{}|{}", file_number, uuid.to_string()); + println!("L|{}|{}", file_number, uuid); } self.pending.push(b'L'); @@ -599,7 +599,7 @@ impl MediaCatalog { }; if self.log_to_stdout { - println!("A|{}|{}|{}", file_number, uuid.to_string(), store); + println!("A|{}|{}|{}", file_number, uuid, store); } self.pending.push(b'A'); @@ -648,7 +648,7 @@ impl MediaCatalog { }; if self.log_to_stdout { - println!("E|{}|{}\n", file_number, uuid.to_string()); + println!("E|{}|{}\n", file_number, uuid); } self.pending.push(b'E'); @@ -713,7 +713,7 @@ impl MediaCatalog { }; if self.log_to_stdout { - println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, path,); + println!("S|{}|{}|{}:{}", file_number, uuid, store, path,); } self.pending.push(b'S'); diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index b2ff8620..0a2e45fb 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -449,7 +449,7 @@ impl PoolWriter { self.catalog_set.lock().unwrap().register_snapshot( content_uuid, current_file_number, - &snapshot_reader.datastore_name().to_string(), + snapshot_reader.datastore_name(), snapshot_reader.snapshot().backup_ns(), snapshot_reader.snapshot().as_ref(), )?; diff --git a/src/tools/apt.rs b/src/tools/apt.rs index c638f570..58cd605b 100644 --- a/src/tools/apt.rs +++ b/src/tools/apt.rs @@ -132,7 +132,7 @@ fn get_changelog_url( Some(captures) => { let base_capture = captures.get(1); match base_capture { - Some(base_underscore) => base_underscore.as_str().replace("_", "/"), + Some(base_underscore) => base_underscore.as_str().replace('_', "/"), None => bail!("incompatible filename, cannot find regex group"), } } diff --git a/src/tools/subscription.rs b/src/tools/subscription.rs index 01a9a70e..14b95f30 100644 --- a/src/tools/subscription.rs +++ b/src/tools/subscription.rs @@ -278,7 +278,7 @@ pub fn read_subscription() -> Result, Error> { }; let encoded: String = cfg.collect::(); - let decoded = base64::decode(encoded.to_owned())?; + let decoded = base64::decode(&encoded)?; let decoded = std::str::from_utf8(&decoded)?; let info: SubscriptionInfo = serde_json::from_str(decoded)?;