diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 60137d2a..7ee64fb0 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -556,10 +556,7 @@ impl Authid { } pub fn tokenname(&self) -> Option<&TokennameRef> { - match &self.tokenname { - Some(name) => Some(&name), - None => None, - } + self.tokenname.as_deref() } /// Get the "root@pam" auth id. diff --git a/pbs-client/src/backup_repo.rs b/pbs-client/src/backup_repo.rs index dc9b8ec8..2fae92ce 100644 --- a/pbs-client/src/backup_repo.rs +++ b/pbs-client/src/backup_repo.rs @@ -37,7 +37,7 @@ impl BackupRepository { return auth_id; } - &Authid::root_auth_id() + Authid::root_auth_id() } pub fn user(&self) -> &Userid { diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 000b7eb7..b02798bd 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -321,7 +321,7 @@ impl BackupWriter { self.h2.clone(), wid, stream, - &prefix, + prefix, known_chunks.clone(), if options.encrypt { self.crypt_config.clone() diff --git a/pbs-client/src/catalog_shell.rs b/pbs-client/src/catalog_shell.rs index 007d2346..c9c9da67 100644 --- a/pbs-client/src/catalog_shell.rs +++ b/pbs-client/src/catalog_shell.rs @@ -529,7 +529,7 @@ impl Shell { }; let new_stack = - Self::lookup(&stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?; + Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?; *stack = new_stack; @@ -993,7 +993,7 @@ impl Shell { &mut self.catalog, dir_stack, extractor, - &match_list, + match_list, &self.accessor, )?; @@ -1118,7 +1118,7 @@ impl<'a> ExtractorState<'a> { self.path_len_stack.push(self.path_len); self.path_len = self.path.len(); - Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?; + Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; let dir_pxar = self.dir_stack.last().unwrap().pxar.as_ref().unwrap(); let dir_meta = dir_pxar.entry().metadata().clone(); let create = self.matches && match_result != Some(MatchType::Exclude); @@ -1141,7 +1141,7 @@ impl<'a> ExtractorState<'a> { } (true, DirEntryAttribute::File { .. }) => { self.dir_stack.push(PathStackEntry::new(entry)); - let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?; + let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; self.extract_file(file).await?; self.dir_stack.pop(); } @@ -1153,7 +1153,7 @@ impl<'a> ExtractorState<'a> { | (true, DirEntryAttribute::Hardlink) => { let attr = entry.attr.clone(); self.dir_stack.push(PathStackEntry::new(entry)); - let file = Shell::walk_pxar_archive(&self.accessor, &mut self.dir_stack).await?; + let file = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?; self.extract_special(file, attr).await?; self.dir_stack.pop(); } diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 92ffeadf..9f3b0576 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -547,7 +547,7 @@ impl Archiver { None => return Ok(()), }; - let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?; + let metadata = get_metadata(fd.as_raw_fd(), stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?; if self .patterns @@ -629,14 +629,14 @@ impl Archiver { catalog.lock().unwrap().add_block_device(c_file_name)?; } - self.add_device(encoder, file_name, &metadata, &stat).await + self.add_device(encoder, file_name, &metadata, stat).await } mode::IFCHR => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_char_device(c_file_name)?; } - self.add_device(encoder, file_name, &metadata, &stat).await + self.add_device(encoder, file_name, &metadata, stat).await } other => bail!( "encountered unknown file type: 0x{:x} (0o{:o})", @@ -656,7 +656,7 @@ impl Archiver { ) -> Result<(), Error> { let dir_name = OsStr::from_bytes(dir_name.to_bytes()); - let mut encoder = encoder.create_directory(dir_name, &metadata).await?; + let mut encoder = encoder.create_directory(dir_name, metadata).await?; let old_fs_magic = self.fs_magic; let old_fs_feature_flags = self.fs_feature_flags; @@ -820,17 +820,17 @@ fn get_xattr_fcaps_acl( }; for attr in &xattrs { - if xattr::is_security_capability(&attr) { + if xattr::is_security_capability(attr) { get_fcaps(meta, fd, flags, fs_feature_flags)?; continue; } - if xattr::is_acl(&attr) { + if xattr::is_acl(attr) { get_acl(meta, proc_path, flags, fs_feature_flags)?; continue; } - if !xattr::is_valid_xattr_name(&attr) { + if !xattr::is_valid_xattr_name(attr) { continue; } diff --git a/pbs-client/src/pxar/fuse.rs b/pbs-client/src/pxar/fuse.rs index 89847c0a..b039c6bd 100644 --- a/pbs-client/src/pxar/fuse.rs +++ b/pbs-client/src/pxar/fuse.rs @@ -649,7 +649,7 @@ impl SessionImpl { #[inline] fn to_entry(entry: &FileEntry) -> Result { - to_entry_param(to_inode(&entry), &entry) + to_entry_param(to_inode(entry), entry) } #[inline] diff --git a/pbs-client/src/remote_chunk_reader.rs b/pbs-client/src/remote_chunk_reader.rs index ed7cda2d..dff0ae39 100644 --- a/pbs-client/src/remote_chunk_reader.rs +++ b/pbs-client/src/remote_chunk_reader.rs @@ -50,7 +50,7 @@ impl RemoteChunkReader { let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024); self.client - .download_chunk(&digest, &mut chunk_data) + .download_chunk(digest, &mut chunk_data) .await?; let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?; diff --git a/pbs-client/src/task_log.rs b/pbs-client/src/task_log.rs index f97b9823..0e55a34a 100644 --- a/pbs-client/src/task_log.rs +++ b/pbs-client/src/task_log.rs @@ -110,7 +110,7 @@ pub async fn view_task_result( display_task_log(client, upid, true).await?; } } else { - format_and_print_result(&data, &output_format); + format_and_print_result(data, output_format); } Ok(()) diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 92562068..3e297758 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -319,7 +319,7 @@ pub async fn complete_server_file_name_do(param: &HashMap) -> Ve pub fn complete_archive_name(arg: &str, param: &HashMap) -> Vec { complete_server_file_name(arg, param) .iter() - .map(|v| pbs_tools::format::strip_server_file_extension(&v).to_owned()) + .map(|v| pbs_tools::format::strip_server_file_extension(v).to_owned()) .collect() } diff --git a/pbs-config/src/cached_user_info.rs b/pbs-config/src/cached_user_info.rs index 24390f50..a7382d55 100644 --- a/pbs-config/src/cached_user_info.rs +++ b/pbs-config/src/cached_user_info.rs @@ -109,7 +109,7 @@ impl CachedUserInfo { required_privs: u64, partial: bool, ) -> Result<(), Error> { - let privs = self.lookup_privs(&auth_id, path); + let privs = self.lookup_privs(auth_id, path); let allowed = if partial { (privs & required_privs) != 0 } else { diff --git a/pbs-config/src/datastore.rs b/pbs-config/src/datastore.rs index b47b05e2..12071a9f 100644 --- a/pbs-config/src/datastore.rs +++ b/pbs-config/src/datastore.rs @@ -45,7 +45,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?; + let raw = CONFIG.write(DATASTORE_CFG_FILENAME, config)?; replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/domains.rs b/pbs-config/src/domains.rs index 7b6b3768..90a83bc6 100644 --- a/pbs-config/src/domains.rs +++ b/pbs-config/src/domains.rs @@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(DOMAINS_CFG_FILENAME, &config)?; + let raw = CONFIG.write(DOMAINS_CFG_FILENAME, config)?; replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/drive.rs b/pbs-config/src/drive.rs index c37bf673..13be8841 100644 --- a/pbs-config/src/drive.rs +++ b/pbs-config/src/drive.rs @@ -81,7 +81,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { /// Save the configuration file pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(DRIVE_CFG_FILENAME, &config)?; + let raw = CONFIG.write(DRIVE_CFG_FILENAME, config)?; replace_backup_config(DRIVE_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/key_config.rs b/pbs-config/src/key_config.rs index 47a096dd..cb8f1701 100644 --- a/pbs-config/src/key_config.rs +++ b/pbs-config/src/key_config.rs @@ -40,7 +40,7 @@ impl KeyDerivationConfig { // estimated scrypt memory usage is 128*r*n*p openssl::pkcs5::scrypt( passphrase, - &salt, + salt, *n, *r, *p, 1025*1024*1024, &mut key, @@ -52,7 +52,7 @@ impl KeyDerivationConfig { openssl::pkcs5::pbkdf2_hmac( passphrase, - &salt, + salt, *iter, openssl::hash::MessageDigest::sha256(), &mut key, @@ -235,10 +235,10 @@ impl KeyConfig { openssl::symm::decrypt_aead( cipher, &derived_key, - Some(&iv), + Some(iv), b"", - &enc_data, - &tag, + enc_data, + tag, ).map_err(|err| { match self.hint { Some(ref hint) => { diff --git a/pbs-config/src/media_pool.rs b/pbs-config/src/media_pool.rs index c07af123..35a2924e 100644 --- a/pbs-config/src/media_pool.rs +++ b/pbs-config/src/media_pool.rs @@ -59,7 +59,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { /// Save the configuration file pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, &config)?; + let raw = CONFIG.write(MEDIA_POOL_CFG_FILENAME, config)?; replace_backup_config(MEDIA_POOL_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/network/helper.rs b/pbs-config/src/network/helper.rs index 58d6339e..ef6149cf 100644 --- a/pbs-config/src/network/helper.rs +++ b/pbs-config/src/network/helper.rs @@ -95,7 +95,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option, bool), E ).unwrap(); } - if let Some(caps) = CIDR_V4_REGEX.captures(&cidr) { + if let Some(caps) = CIDR_V4_REGEX.captures(cidr) { let address = &caps[1]; if let Some(mask) = caps.get(2) { let mask = u8::from_str_radix(mask.as_str(), 10)?; @@ -104,7 +104,7 @@ pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option, bool), E } else { Ok((address.to_string(), None, false)) } - } else if let Some(caps) = CIDR_V6_REGEX.captures(&cidr) { + } else if let Some(caps) = CIDR_V6_REGEX.captures(cidr) { let address = &caps[1]; if let Some(mask) = caps.get(2) { let mask = u8::from_str_radix(mask.as_str(), 10)?; diff --git a/pbs-config/src/remote.rs b/pbs-config/src/remote.rs index a90aa13b..921410e2 100644 --- a/pbs-config/src/remote.rs +++ b/pbs-config/src/remote.rs @@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(REMOTE_CFG_FILENAME, &config)?; + let raw = CONFIG.write(REMOTE_CFG_FILENAME, config)?; crate::replace_backup_config(REMOTE_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/sync.rs b/pbs-config/src/sync.rs index 23cf4607..f515613f 100644 --- a/pbs-config/src/sync.rs +++ b/pbs-config/src/sync.rs @@ -47,7 +47,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(SYNC_CFG_FILENAME, &config)?; + let raw = CONFIG.write(SYNC_CFG_FILENAME, config)?; replace_backup_config(SYNC_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/tape_job.rs b/pbs-config/src/tape_job.rs index 82eda8e1..992d4382 100644 --- a/pbs-config/src/tape_job.rs +++ b/pbs-config/src/tape_job.rs @@ -45,7 +45,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, &config)?; + let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, config)?; replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-config/src/token_shadow.rs b/pbs-config/src/token_shadow.rs index a0b08550..1f46b33d 100644 --- a/pbs-config/src/token_shadow.rs +++ b/pbs-config/src/token_shadow.rs @@ -58,7 +58,7 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> { let data = read_file()?; match data.get(tokenid) { Some(hashed_secret) => { - proxmox_sys::crypt::verify_crypt_pw(secret, &hashed_secret) + proxmox_sys::crypt::verify_crypt_pw(secret, hashed_secret) }, None => bail!("invalid API token"), } diff --git a/pbs-config/src/traffic_control.rs b/pbs-config/src/traffic_control.rs index ac887069..860d0fb7 100644 --- a/pbs-config/src/traffic_control.rs +++ b/pbs-config/src/traffic_control.rs @@ -54,7 +54,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { /// Save the configuration file pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, &config)?; + let raw = CONFIG.write(TRAFFIC_CONTROL_CFG_FILENAME, config)?; replace_backup_config(TRAFFIC_CONTROL_CFG_FILENAME, raw.as_bytes())?; // increase traffic control version @@ -88,7 +88,7 @@ mod test { timeframe mon..wed 8:00-16:30 timeframe fri 9:00-12:00 "; - let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?; + let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, content)?; eprintln!("GOT {:?}", data); Ok(()) diff --git a/pbs-config/src/user.rs b/pbs-config/src/user.rs index ada1cd8d..f62d45db 100644 --- a/pbs-config/src/user.rs +++ b/pbs-config/src/user.rs @@ -117,7 +117,7 @@ pub fn cached_config() -> Result, Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(USER_CFG_FILENAME, &config)?; + let raw = CONFIG.write(USER_CFG_FILENAME, config)?; replace_backup_config(USER_CFG_FILENAME, raw.as_bytes())?; // increase user version diff --git a/pbs-config/src/verify.rs b/pbs-config/src/verify.rs index 85e4bbfc..b6c70caa 100644 --- a/pbs-config/src/verify.rs +++ b/pbs-config/src/verify.rs @@ -46,7 +46,7 @@ pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { } pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { - let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, &config)?; + let raw = CONFIG.write(VERIFICATION_CFG_FILENAME, config)?; replace_backup_config(VERIFICATION_CFG_FILENAME, raw.as_bytes()) } diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 81933b08..aceeec97 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -160,7 +160,7 @@ impl BackupGroup { pub fn matches(&self, filter: &GroupFilter) -> bool { match filter { - GroupFilter::Group(backup_group) => match BackupGroup::from_str(&backup_group) { + GroupFilter::Group(backup_group) => match BackupGroup::from_str(backup_group) { Ok(group) => &group == self, Err(_) => false, // shouldn't happen if value is schema-checked }, diff --git a/pbs-datastore/src/catalog.rs b/pbs-datastore/src/catalog.rs index 02f3e41e..22454921 100644 --- a/pbs-datastore/src/catalog.rs +++ b/pbs-datastore/src/catalog.rs @@ -506,7 +506,7 @@ impl CatalogReader { if let Some(entry) = self.lookup(¤t, comp)? { current = entry; } else { - bail!("path {:?} not found in catalog", String::from_utf8_lossy(&path)); + bail!("path {:?} not found in catalog", String::from_utf8_lossy(path)); } } Ok(current) @@ -612,7 +612,7 @@ impl CatalogReader { file_path.extend(&e.name); match match_list.matches(&file_path, e.get_file_mode()) { Some(MatchType::Exclude) => continue, - Some(MatchType::Include) => callback(&file_path)?, + Some(MatchType::Include) => callback(file_path)?, None => (), } if is_dir { diff --git a/pbs-datastore/src/data_blob.rs b/pbs-datastore/src/data_blob.rs index 7da461e0..5f02abe7 100644 --- a/pbs-datastore/src/data_blob.rs +++ b/pbs-datastore/src/data_blob.rs @@ -123,7 +123,7 @@ impl DataBlob { raw_data.write_le_value(dummy_head)?; } - let (iv, tag) = Self::encrypt_to(&config, data, &mut raw_data)?; + let (iv, tag) = Self::encrypt_to(config, data, &mut raw_data)?; let head = EncryptedDataBlobHeader { head: DataBlobHeader { magic, crc: [0; 4] }, iv, tag, @@ -491,7 +491,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> { fn compute_digest(&mut self) { if !self.digest_computed { - if let Some(ref config) = self.config { + if let Some(config) = self.config { self.digest = config.compute_digest(self.orig_data); } else { self.digest = openssl::sha::sha256(self.orig_data); @@ -531,7 +531,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> { ) -> Result<(DataBlob, [u8; 32]), Error> { let zero_bytes = vec![0; chunk_size]; let mut chunk_builder = DataChunkBuilder::new(&zero_bytes).compress(compress); - if let Some(ref crypt_config) = crypt_config { + if let Some(crypt_config) = crypt_config { chunk_builder = chunk_builder.crypt_config(crypt_config); } diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 5a97933e..fb262fdc 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -839,7 +839,7 @@ impl DataStore { ) -> Result<(), Error> { let _guard = self.lock_manifest(backup_dir)?; - let (mut manifest, _) = self.load_manifest(&backup_dir)?; + let (mut manifest, _) = self.load_manifest(backup_dir)?; update_fn(&mut manifest); @@ -919,7 +919,7 @@ impl DataStore { } // sorting by inode improves data locality, which makes it lots faster on spinners - chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b)); + chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b)); Ok(chunk_list) } diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index 58e8aba7..42b4bf29 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -214,7 +214,7 @@ impl BackupManifest { let json: Value = serde_json::from_slice(data)?; let signature = json["signature"].as_str().map(String::from); - if let Some(ref crypt_config) = crypt_config { + if let Some(crypt_config) = crypt_config { if let Some(signature) = signature { let expected_signature = hex::encode(&Self::json_signature(&json, crypt_config)?); diff --git a/pbs-datastore/src/paperkey.rs b/pbs-datastore/src/paperkey.rs index f9c4001c..8caca0b9 100644 --- a/pbs-datastore/src/paperkey.rs +++ b/pbs-datastore/src/paperkey.rs @@ -53,7 +53,7 @@ pub fn generate_paper_key( (lines, true) } else { - match serde_json::from_str::(&data) { + match serde_json::from_str::(data) { Ok(key_config) => { let lines = serde_json::to_string_pretty(&key_config)? .lines() @@ -216,7 +216,7 @@ fn paperkey_text( } writeln!(output, "-----END PROXMOX BACKUP KEY-----")?; - let qr_code = generate_qr_code("utf8i", &lines)?; + let qr_code = generate_qr_code("utf8i", lines)?; let qr_code = String::from_utf8(qr_code) .map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?; diff --git a/pbs-datastore/src/prune.rs b/pbs-datastore/src/prune.rs index f968e718..09448f6f 100644 --- a/pbs-datastore/src/prune.rs +++ b/pbs-datastore/src/prune.rs @@ -44,7 +44,7 @@ fn mark_selections Result> ( for info in list { let backup_id = info.backup_dir.relative_path(); if let Some(PruneMark::Keep) = mark.get(&backup_id) { - let sel_id: String = select_id(&info)?; + let sel_id: String = select_id(info)?; already_included.insert(sel_id); } } @@ -56,7 +56,7 @@ fn mark_selections Result> ( mark.insert(backup_id, PruneMark::Protected); continue; } - let sel_id: String = select_id(&info)?; + let sel_id: String = select_id(info)?; if already_included.contains(&sel_id) { continue; } diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index 7e2ddf5b..18bc0d83 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -89,7 +89,7 @@ impl SnapshotReader { /// Returns an iterator for all used chunks. pub fn chunk_iterator(&self) -> Result { - SnapshotChunkIterator::new(&self) + SnapshotChunkIterator::new(self) } } diff --git a/pbs-tape/src/bin/pmt.rs b/pbs-tape/src/bin/pmt.rs index 281c0df0..8f61c322 100644 --- a/pbs-tape/src/bin/pmt.rs +++ b/pbs-tape/src/bin/pmt.rs @@ -62,14 +62,14 @@ fn get_tape_handle(param: &Value) -> Result { if let Some(name) = param["drive"].as_str() { let (config, _digest) = pbs_config::drive::config()?; - let drive: LtoTapeDrive = config.lookup("lto", &name)?; + let drive: LtoTapeDrive = config.lookup("lto", name)?; eprintln!("using device {}", drive.path); return SgTape::new(open_lto_tape_device(&drive.path)?); } if let Some(device) = param["device"].as_str() { eprintln!("using device {}", device); - return SgTape::new(open_lto_tape_device(&device)?); + return SgTape::new(open_lto_tape_device(device)?); } if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") { @@ -94,7 +94,7 @@ fn get_tape_handle(param: &Value) -> Result { if drive_names.len() == 1 { let name = drive_names[0]; - let drive: LtoTapeDrive = config.lookup("lto", &name)?; + let drive: LtoTapeDrive = config.lookup("lto", name)?; eprintln!("using device {}", drive.path); return SgTape::new(open_lto_tape_device(&drive.path)?); } diff --git a/pbs-tape/src/bin/pmtx.rs b/pbs-tape/src/bin/pmtx.rs index 7191060b..c8682c3f 100644 --- a/pbs-tape/src/bin/pmtx.rs +++ b/pbs-tape/src/bin/pmtx.rs @@ -36,7 +36,7 @@ fn get_changer_handle(param: &Value) -> Result { if let Some(name) = param["changer"].as_str() { let (config, _digest) = pbs_config::drive::config()?; - let changer_config: ScsiTapeChanger = config.lookup("changer", &name)?; + let changer_config: ScsiTapeChanger = config.lookup("changer", name)?; eprintln!("using device {}", changer_config.path); return sg_pt_changer::open(&changer_config.path); } diff --git a/pbs-tape/src/lib.rs b/pbs-tape/src/lib.rs index 570314e3..fadcb530 100644 --- a/pbs-tape/src/lib.rs +++ b/pbs-tape/src/lib.rs @@ -312,7 +312,7 @@ impl MtxStatus { let mut export_slots: HashSet = HashSet::new(); if let Some(slots) = &config.export_slots { - let slots: Value = SLOT_ARRAY_SCHEMA.parse_property_string(&slots)?; + let slots: Value = SLOT_ARRAY_SCHEMA.parse_property_string(slots)?; export_slots = slots .as_array() .unwrap() diff --git a/pbs-tape/src/sg_pt_changer.rs b/pbs-tape/src/sg_pt_changer.rs index b73de930..02c20091 100644 --- a/pbs-tape/src/sg_pt_changer.rs +++ b/pbs-tape/src/sg_pt_changer.rs @@ -86,7 +86,7 @@ fn execute_scsi_command( let mut timeout = std::time::Duration::new(5, 0); // short timeout by default loop { - match sg_raw.do_command(&cmd) { + match sg_raw.do_command(cmd) { Ok(data) => return Ok(data.to_vec()), Err(err) if !retry => bail!("{} failed: {}", error_prefix, err), Err(err) => { @@ -487,7 +487,7 @@ pub fn status(config: &ScsiTapeChanger) -> Result { let mut status = read_element_status(&mut file) .map_err(|err| format_err!("error reading element status: {}", err))?; - status.mark_import_export_slots(&config)?; + status.mark_import_export_slots(config)?; Ok(status) } @@ -827,7 +827,7 @@ mod test { element_type: u8, ) -> Vec { let descs: Vec> = descriptors.iter().map(|desc| { - build_storage_descriptor(&desc, trailing) + build_storage_descriptor(desc, trailing) }).collect(); let (desc_len, address) = if let Some(el) = descs.get(0) { diff --git a/pbs-tools/src/cert.rs b/pbs-tools/src/cert.rs index f438f6a3..a30c7996 100644 --- a/pbs-tools/src/cert.rs +++ b/pbs-tools/src/cert.rs @@ -46,7 +46,7 @@ impl CertInfo { } pub fn from_pem(cert_pem: &[u8]) -> Result { - let x509 = openssl::x509::X509::from_pem(&cert_pem)?; + let x509 = openssl::x509::X509::from_pem(cert_pem)?; Ok(Self{ x509 }) @@ -87,11 +87,11 @@ impl CertInfo { } pub fn not_before_unix(&self) -> Result { - asn1_time_to_unix(&self.not_before()) + asn1_time_to_unix(self.not_before()) } pub fn not_after_unix(&self) -> Result { - asn1_time_to_unix(&self.not_after()) + asn1_time_to_unix(self.not_after()) } /// Check if the certificate is expired at or after a specific unix epoch. diff --git a/pbs-tools/src/json.rs b/pbs-tools/src/json.rs index b3e2940f..1b2ebcd5 100644 --- a/pbs-tools/src/json.rs +++ b/pbs-tools/src/json.rs @@ -64,7 +64,7 @@ pub fn json_object_to_query(data: Value) -> Result { query.append_pair(key, &n.to_string()); } Value::String(s) => { - query.append_pair(key, &s); + query.append_pair(key, s); } Value::Array(arr) => { for element in arr { @@ -76,7 +76,7 @@ pub fn json_object_to_query(data: Value) -> Result { query.append_pair(key, &n.to_string()); } Value::String(s) => { - query.append_pair(key, &s); + query.append_pair(key, s); } _ => bail!( "json_object_to_query: unable to handle complex array data types." @@ -121,14 +121,14 @@ pub fn required_integer_property(param: &Value, name: &str) -> Result(param: &'a Value, name: &str) -> Result<&'a [Value], Error> { match param[name].as_array() { - Some(s) => Ok(&s), + Some(s) => Ok(s), None => bail!("missing parameter '{}'", name), } } pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [Value], Error> { match param[name].as_array() { - Some(s) => Ok(&s), + Some(s) => Ok(s), None => bail!("missing property '{}'", name), } } diff --git a/pbs-tools/src/ticket.rs b/pbs-tools/src/ticket.rs index dd928635..9828d429 100644 --- a/pbs-tools/src/ticket.rs +++ b/pbs-tools/src/ticket.rs @@ -97,8 +97,8 @@ where write!( f, "{}:{}:{:08X}", - percent_encode(self.prefix.as_bytes(), &TICKET_ASCIISET), - percent_encode(self.data.as_bytes(), &TICKET_ASCIISET), + percent_encode(self.prefix.as_bytes(), TICKET_ASCIISET), + percent_encode(self.data.as_bytes(), TICKET_ASCIISET), self.time, ) .map_err(Error::from) @@ -107,7 +107,7 @@ where /// Write additional authentication data to the verifier. fn write_aad(f: &mut dyn io::Write, aad: Option<&str>) -> Result<(), Error> { if let Some(aad) = aad { - write!(f, ":{}", percent_encode(aad.as_bytes(), &TICKET_ASCIISET))?; + write!(f, ":{}", percent_encode(aad.as_bytes(), TICKET_ASCIISET))?; } Ok(()) } @@ -122,7 +122,7 @@ where /// Sign the ticket. pub fn sign(&mut self, keypair: &PKey, aad: Option<&str>) -> Result { let mut output = Vec::::new(); - let mut signer = Signer::new(MessageDigest::sha256(), &keypair) + let mut signer = Signer::new(MessageDigest::sha256(), keypair) .map_err(|err| format_err!("openssl error creating signer for ticket: {}", err))?; self.write_data(&mut output) @@ -179,14 +179,14 @@ where bail!("invalid ticket - expired"); } - let mut verifier = Verifier::new(MessageDigest::sha256(), &keypair)?; + let mut verifier = Verifier::new(MessageDigest::sha256(), keypair)?; self.write_data(&mut verifier) .and_then(|()| Self::write_aad(&mut verifier, aad)) .map_err(|err| format_err!("error verifying ticket: {}", err))?; let is_valid: bool = verifier - .verify(&signature) + .verify(signature) .map_err(|err| format_err!("openssl error verifying ticket: {}", err))?; if !is_valid { diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index 60b7c2e0..b0c4eaf0 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -90,8 +90,8 @@ async fn dump_catalog(param: Value) -> Result { client, crypt_config.clone(), repo.store(), - &snapshot.group().backup_type(), - &snapshot.group().backup_id(), + snapshot.group().backup_type(), + snapshot.group().backup_id(), snapshot.backup_time(), true, ).await?; @@ -103,7 +103,7 @@ async fn dump_catalog(param: Value) -> Result { let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(&CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); @@ -232,7 +232,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(&CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut catalogfile = std::fs::OpenOptions::new() diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index 94cffa19..50682637 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -654,7 +654,7 @@ async fn create_backup( let crypto = crypto_parameters(¶m)?; - let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox_sys::nodename()); + let backup_id = param["backup-id"].as_str().unwrap_or(proxmox_sys::nodename()); let backup_type = param["backup-type"].as_str().unwrap_or("host"); @@ -794,7 +794,7 @@ async fn create_backup( crypt_config.clone(), repo.store(), backup_type, - &backup_id, + backup_id, backup_time, verbose, false @@ -1003,7 +1003,7 @@ async fn dump_image( for pos in 0..index.index_count() { let digest = index.index_digest(pos).unwrap(); - let raw_data = chunk_reader.read_chunk(&digest).await?; + let raw_data = chunk_reader.read_chunk(digest).await?; writer.write_all(&raw_data)?; bytes += raw_data.len(); if verbose { diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index 34daa935..56410d55 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -259,7 +259,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let session = pbs_client::pxar::fuse::Session::mount( decoder, - &options, + options, false, Path::new(target.unwrap()), ) diff --git a/proxmox-file-restore/src/block_driver_qemu.rs b/proxmox-file-restore/src/block_driver_qemu.rs index 25a9b3fe..f36832de 100644 --- a/proxmox-file-restore/src/block_driver_qemu.rs +++ b/proxmox-file-restore/src/block_driver_qemu.rs @@ -325,7 +325,7 @@ impl BlockRestoreDriver for QemuBlockDriver { match VMStateMap::load_read_only() { Ok(state) => state .iter() - .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok()) + .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(name).ok()) .collect(), Err(_) => Vec::new(), } diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 055e018a..0ada15e6 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -175,8 +175,8 @@ async fn list( client, crypt_config.clone(), repo.store(), - &snapshot.group().backup_type(), - &snapshot.group().backup_id(), + snapshot.group().backup_type(), + snapshot.group().backup_id(), snapshot.backup_time(), true, ) @@ -209,7 +209,7 @@ async fn list( .download_dynamic_index(&manifest, CATALOG_NAME) .await?; let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(&CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let chunk_reader = RemoteChunkReader::new( client.clone(), crypt_config, @@ -348,8 +348,8 @@ async fn extract( client, crypt_config.clone(), repo.store(), - &snapshot.group().backup_type(), - &snapshot.group().backup_id(), + snapshot.group().backup_type(), + snapshot.group().backup_id(), snapshot.backup_time(), true, ) diff --git a/proxmox-rest-server/src/rest.rs b/proxmox-rest-server/src/rest.rs index d08b4988..3f846cc6 100644 --- a/proxmox-rest-server/src/rest.rs +++ b/proxmox-rest-server/src/rest.rs @@ -241,7 +241,7 @@ fn get_proxied_peer(headers: &HeaderMap) -> Option { static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap(); } let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?; - let capture = RE.captures(&forwarded)?; + let capture = RE.captures(forwarded)?; let rhost = capture.get(1)?.as_str(); rhost.parse().ok() diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs index febd545a..bcd1d815 100644 --- a/proxmox-rest-server/src/worker_task.rs +++ b/proxmox-rest-server/src/worker_task.rs @@ -151,7 +151,7 @@ impl WorkerTaskSetup { finish_list.sort_unstable_by(|a, b| { match (&a.state, &b.state) { - (Some(s1), Some(s2)) => s1.cmp(&s2), + (Some(s1), Some(s2)) => s1.cmp(s2), (Some(_), None) => std::cmp::Ordering::Less, (None, Some(_)) => std::cmp::Ordering::Greater, _ => a.upid.starttime.cmp(&b.upid.starttime), @@ -170,7 +170,7 @@ impl WorkerTaskSetup { false, )?; for info in &finish_list { - writer.write_all(render_task_line(&info).as_bytes())?; + writer.write_all(render_task_line(info).as_bytes())?; } } @@ -580,7 +580,7 @@ fn render_task_line(info: &TaskListInfo) -> String { fn render_task_list(list: &[TaskListInfo]) -> String { let mut raw = String::new(); for info in list { - raw.push_str(&render_task_line(&info)); + raw.push_str(&render_task_line(info)); } raw } @@ -980,7 +980,7 @@ pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> { /// Request abort of a local worker (if existing and running) pub fn abort_local_worker(upid: UPID) { - if let Some(ref worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) { + if let Some(worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) { worker.request_abort(); } } diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs index d80b01d3..081e4576 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs @@ -146,7 +146,7 @@ fn list( let param_path_buf = Path::new(path_str); let mut disk_state = crate::DISK_STATE.lock().unwrap(); - let query_result = disk_state.resolve(¶m_path_buf)?; + let query_result = disk_state.resolve(param_path_buf)?; match query_result { ResolveResult::Path(vm_path) => { @@ -275,7 +275,7 @@ fn extract( let query_result = { let mut disk_state = crate::DISK_STATE.lock().unwrap(); - disk_state.resolve(&path)? + disk_state.resolve(path)? }; let vm_path = match query_result { diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs index b68700b0..a0baf56b 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs @@ -398,7 +398,7 @@ impl DiskState { // attempt to mount device directly let dev_node = format!("/dev/{}", name); - let size = Self::make_dev_node(&dev_node, &sys_path)?; + let size = Self::make_dev_node(&dev_node, sys_path)?; let mut dfs_bucket = Bucket::RawFs(PartitionBucketData { dev_node: dev_node.clone(), number: 0, @@ -755,7 +755,7 @@ impl DiskState { fn make_dev_node(devnode: &str, sys_path: &str) -> Result { let dev_num_str = fs::file_read_firstline(&format!("{}/dev", sys_path))?; let (major, minor) = dev_num_str.split_at(dev_num_str.find(':').unwrap()); - Self::mknod_blk(&devnode, major.parse()?, minor[1..].trim_end().parse()?)?; + Self::mknod_blk(devnode, major.parse()?, minor[1..].trim_end().parse()?)?; // this *always* contains the number of 512-byte sectors, regardless of the true // blocksize of this disk - which should always be 512 here anyway diff --git a/proxmox-rrd/src/cache.rs b/proxmox-rrd/src/cache.rs index b786f14f..622a0589 100644 --- a/proxmox-rrd/src/cache.rs +++ b/proxmox-rrd/src/cache.rs @@ -393,10 +393,10 @@ fn commit_journal_impl( // save all RRDs - we only need a read lock here // Note: no fsync here (we do it afterwards) for rel_path in files.iter() { - let parent_dir = rrd_parent_dir(&config.basedir, &rel_path); + let parent_dir = rrd_parent_dir(&config.basedir, rel_path); dir_set.insert(parent_dir); rrd_file_count += 1; - if let Err(err) = rrd_map.read().unwrap().flush_rrd_file(&rel_path) { + if let Err(err) = rrd_map.read().unwrap().flush_rrd_file(rel_path) { errors += 1; log::error!("unable to save rrd {}: {}", rel_path, err); } diff --git a/proxmox-rrd/src/rrd.rs b/proxmox-rrd/src/rrd.rs index 2aebe1ae..4b48d0cf 100644 --- a/proxmox-rrd/src/rrd.rs +++ b/proxmox-rrd/src/rrd.rs @@ -317,7 +317,7 @@ impl RRD { } let rrd = if raw[0..8] == rrd_v1::PROXMOX_RRD_MAGIC_1_0 { - let v1 = rrd_v1::RRDv1::from_raw(&raw)?; + let v1 = rrd_v1::RRDv1::from_raw(raw)?; v1.to_rrd_v2() .map_err(|err| format_err!("unable to convert from old V1 format - {}", err))? } else if raw[0..8] == PROXMOX_RRD_MAGIC_2_0 { diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs index 5bbd4e6d..545e6440 100644 --- a/pxar-bin/src/main.rs +++ b/pxar-bin/src/main.rs @@ -195,7 +195,7 @@ fn extract_archive( let mut reader = stdin.lock(); extract_archive_from_reader( &mut reader, - &target, + target, feature_flags, verbose, options, @@ -208,7 +208,7 @@ fn extract_archive( let mut reader = std::io::BufReader::new(file); extract_archive_from_reader( &mut reader, - &target, + target, feature_flags, verbose, options, @@ -409,7 +409,7 @@ async fn mount_archive( let mountpoint = Path::new(&mountpoint); let options = OsStr::new("ro,default_permissions"); - let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint) + let session = fuse::Session::mount_path(archive, options, verbose, mountpoint) .await .map_err(|err| format_err!("pxar mount failed: {}", err))?; diff --git a/src/acme/client.rs b/src/acme/client.rs index be07b26f..bc9d4ec4 100644 --- a/src/acme/client.rs +++ b/src/acme/client.rs @@ -233,7 +233,7 @@ impl AcmeClient { ) .await?; - let request = account.post_request(&account.location, &nonce, data)?; + let request = account.post_request(&account.location, nonce, data)?; match Self::execute(&mut self.http_client, request, &mut self.nonce).await { Ok(response) => break response, Err(err) if err.is_bad_nonce() => continue, @@ -402,7 +402,7 @@ impl AcmeClient { ) .await?; - let request = revocation.request(&directory, nonce)?; + let request = revocation.request(directory, nonce)?; match Self::execute(&mut self.http_client, request, &mut self.nonce).await { Ok(_response) => return Ok(()), Err(err) if err.is_bad_nonce() => continue, diff --git a/src/acme/plugin.rs b/src/acme/plugin.rs index 65eb60d1..a993c8d7 100644 --- a/src/acme/plugin.rs +++ b/src/acme/plugin.rs @@ -270,7 +270,7 @@ impl AcmePlugin for StandaloneServer { let token = challenge .token() .ok_or_else(|| format_err!("missing token in challenge"))?; - let key_auth = Arc::new(client.key_authorization(&token)?); + let key_auth = Arc::new(client.key_authorization(token)?); let path = Arc::new(format!("/.well-known/acme-challenge/{}", token)); let service = make_service_fn(move |_| { diff --git a/src/api2/access/acl.rs b/src/api2/access/acl.rs index 41810cc3..43d70ee3 100644 --- a/src/api2/access/acl.rs +++ b/src/api2/access/acl.rs @@ -121,7 +121,7 @@ pub fn read_acl( let mut list: Vec = Vec::new(); if let Some(path) = &path { if let Some(node) = &tree.find_node(path) { - extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter); + extract_acl_node_data(node, path, &mut list, exact, &auth_id_filter); } } else { extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter); diff --git a/src/api2/access/mod.rs b/src/api2/access/mod.rs index dab75f99..8805fc00 100644 --- a/src/api2/access/mod.rs +++ b/src/api2/access/mod.rs @@ -118,7 +118,7 @@ fn authenticate_2nd( challenge_ticket: &str, response: &str, ) -> Result { - let challenge: TfaChallenge = Ticket::::parse(&challenge_ticket)? + let challenge: TfaChallenge = Ticket::::parse(challenge_ticket)? .verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)? .require_partial()?; diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index b653f906..a7a99db7 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -83,7 +83,7 @@ fn check_priv_or_backup_owner( required_privs: u64, ) -> Result<(), Error> { let user_info = CachedUserInfo::new()?; - let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]); + let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]); if privs & required_privs == 0 { let owner = store.get_owner(group)?; @@ -125,7 +125,7 @@ fn get_all_snapshot_files( info: &BackupInfo, ) -> Result<(BackupManifest, Vec), Error> { - let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?; + let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?; let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { acc.insert(item.filename.clone()); @@ -536,7 +536,7 @@ pub fn list_snapshots ( snapshots.extend( group_backups .into_iter() - .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info)) + .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)) ); Ok(snapshots) @@ -549,7 +549,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu groups.iter() .filter(|group| { - let owner = match store.get_owner(&group) { + let owner = match store.get_owner(group) { Ok(owner) => owner, Err(err) => { eprintln!("Failed to get owner of group '{}/{}' - {}", @@ -1071,7 +1071,7 @@ pub fn get_datastore_list( let mut list = Vec::new(); for (store, (_, data)) in &config.sections { - let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; if allowed { list.push( @@ -1401,7 +1401,7 @@ pub fn catalog( .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; let (csum, size) = index.compute_csum(); - manifest.verify_file(&file_name, &csum, size)?; + manifest.verify_file(file_name, &csum, size)?; let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); let reader = BufferedDynamicReader::new(index, chunk_reader); @@ -1446,7 +1446,7 @@ pub fn pxar_file_download( async move { let store = required_string_param(¶m, "store")?; - let datastore = DataStore::lookup_datastore(&store)?; + let datastore = DataStore::lookup_datastore(store)?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -1483,7 +1483,7 @@ pub fn pxar_file_download( .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; let (csum, size) = index.compute_csum(); - manifest.verify_file(&pxar_name, &csum, size)?; + manifest.verify_file(pxar_name, &csum, size)?; let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); let reader = BufferedDynamicReader::new(index, chunk_reader); diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index a5e107e9..c85bdf15 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -73,7 +73,7 @@ pub fn list_sync_jobs( } }) .filter(|job: &SyncJobConfig| { - check_sync_job_read_access(&user_info, &auth_id, &job) + check_sync_job_read_access(&user_info, &auth_id, job) }); let mut list = Vec::new(); diff --git a/src/api2/config/access/tfa.rs b/src/api2/config/access/tfa.rs index a961a2f0..cecf0c9f 100644 --- a/src/api2/config/access/tfa.rs +++ b/src/api2/config/access/tfa.rs @@ -95,7 +95,7 @@ pub fn update_webauthn_config( let digest = <[u8; 32]>::from_hex(digest)?; crate::tools::detect_modified_configuration_file( &digest, - &crate::config::tfa::webauthn_config_digest(&wa)?, + &crate::config::tfa::webauthn_config_digest(wa)?, )?; } diff --git a/src/api2/config/acme.rs b/src/api2/config/acme.rs index e8fab85c..2fe1664f 100644 --- a/src/api2/config/acme.rs +++ b/src/api2/config/acme.rs @@ -524,7 +524,7 @@ pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result Result Ok(modify_cfg_for_api(&id, &ty, &data)), + Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)), None => http_bail!(NOT_FOUND, "no such plugin"), } } diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index ada53d68..32983556 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -20,12 +20,12 @@ pub fn check_sync_job_read_access( auth_id: &Authid, job: &SyncJobConfig, ) -> bool { - let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]); + let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]); if datastore_privs & PRIV_DATASTORE_AUDIT == 0 { return false; } - let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]); + let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]); remote_privs & PRIV_REMOTE_AUDIT != 0 } @@ -35,7 +35,7 @@ pub fn check_sync_job_modify_access( auth_id: &Authid, job: &SyncJobConfig, ) -> bool { - let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]); + let datastore_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store]); if datastore_privs & PRIV_DATASTORE_BACKUP == 0 { return false; } @@ -62,7 +62,7 @@ pub fn check_sync_job_modify_access( return false; } - let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]); + let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]); remote_privs & PRIV_REMOTE_READ != 0 } @@ -96,7 +96,7 @@ pub fn list_sync_jobs( let list = list .into_iter() - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job)) + .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) .collect(); Ok(list) } @@ -429,8 +429,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator }; // should work without ACLs - assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true); - assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true); + assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true); + assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true); // user without permissions must fail assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false); diff --git a/src/api2/node/certificates.rs b/src/api2/node/certificates.rs index 4f196327..9508ea38 100644 --- a/src/api2/node/certificates.rs +++ b/src/api2/node/certificates.rs @@ -330,7 +330,7 @@ async fn order_certificate( for auth_url in &order.data.authorizations { task_log!(worker, "Getting authorization details from '{}'", auth_url); - let mut auth = acme.get_authorization(&auth_url).await?; + let mut auth = acme.get_authorization(auth_url).await?; let domain = match &mut auth.identifier { Identifier::Dns(domain) => domain.to_ascii_lowercase(), @@ -442,7 +442,7 @@ async fn request_validation( validation_url: &str, ) -> Result<(), Error> { task_log!(worker, "Triggering validation"); - acme.request_challenge_validation(&validation_url).await?; + acme.request_challenge_validation(validation_url).await?; task_log!(worker, "Sleeping for 5 seconds"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -450,7 +450,7 @@ async fn request_validation( loop { use proxmox_acme_rs::authorization::Status; - let auth = acme.get_authorization(&auth_url).await?; + let auth = acme.get_authorization(auth_url).await?; match auth.status { Status::Pending => { task_log!(worker, "Status is still 'pending', trying again in 10 seconds"); diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index d8e81c52..bf1a1be6 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -282,7 +282,7 @@ fn create_datastore_mount_unit( what: &str, ) -> Result { - let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&mount_point, true); + let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true); mount_unit_name.push_str(".mount"); let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name); diff --git a/src/api2/node/dns.rs b/src/api2/node/dns.rs index 18312e31..8e4d4ada 100644 --- a/src/api2/node/dns.rs +++ b/src/api2/node/dns.rs @@ -55,9 +55,9 @@ pub fn read_etc_resolv_conf() -> Result { for line in data.lines() { - if let Some(caps) = DOMAIN_REGEX.captures(&line) { + if let Some(caps) = DOMAIN_REGEX.captures(line) { result["search"] = Value::from(&caps[1]); - } else if let Some(caps) = SERVER_REGEX.captures(&line) { + } else if let Some(caps) = SERVER_REGEX.captures(line) { nscount += 1; if nscount > 3 { continue }; let nameserver = &caps[1]; diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs index 24d66e58..8c39afc8 100644 --- a/src/api2/node/mod.rs +++ b/src/api2/node/mod.rs @@ -121,7 +121,7 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?.sign( private_auth_key(), - Some(&tools::ticket::term_aad(&userid, &path, port)), + Some(&tools::ticket::term_aad(userid, path, port)), )?; let mut command = Vec::new(); @@ -161,7 +161,7 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu arguments.push(&fd_string); arguments.extend_from_slice(&[ "--path", - &path, + path, "--perm", "Sys.Console", "--authport", @@ -293,7 +293,7 @@ fn upgrade_to_websocket( Ticket::::parse(ticket)?.verify( crate::auth_helpers::public_auth_key(), ticket::TERM_PREFIX, - Some(&tools::ticket::term_aad(&userid, "/system", port)), + Some(&tools::ticket::term_aad(userid, "/system", port)), )?; let (ws, response) = WebSocket::new(parts.headers.clone())?; diff --git a/src/api2/node/network.rs b/src/api2/node/network.rs index dc6cb5d4..77821fde 100644 --- a/src/api2/node/network.rs +++ b/src/api2/node/network.rs @@ -17,7 +17,7 @@ use pbs_config::network::{self, NetworkConfig}; use proxmox_rest_server::WorkerTask; fn split_interface_list(list: &str) -> Result, Error> { - let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(&list)?; + let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?; Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect()) } diff --git a/src/api2/node/services.rs b/src/api2/node/services.rs index b6a7f1ae..9f08b85a 100644 --- a/src/api2/node/services.rs +++ b/src/api2/node/services.rs @@ -176,9 +176,9 @@ fn get_service_state( bail!("unknown service name '{}'", service); } - let status = get_full_service_state(&service)?; + let status = get_full_service_state(service)?; - Ok(json_service_state(&service, status)) + Ok(json_service_state(service, status)) } fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result { diff --git a/src/api2/node/tasks.rs b/src/api2/node/tasks.rs index 72f78bad..07353d0b 100644 --- a/src/api2/node/tasks.rs +++ b/src/api2/node/tasks.rs @@ -24,9 +24,9 @@ use pbs_config::CachedUserInfo; fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> { match (upid.worker_type.as_str(), &upid.worker_id) { ("verificationjob", Some(workerid)) => { - if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) { + if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) { if let Some(store) = captures.get(1) { - return user_info.check_privs(&auth_id, + return user_info.check_privs(auth_id, &["datastore", store.as_str()], PRIV_DATASTORE_VERIFY, true); @@ -34,7 +34,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> } }, ("syncjob", Some(workerid)) => { - if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) { + if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) { let remote = captures.get(1); let remote_store = captures.get(2); let local_store = captures.get(3); @@ -42,7 +42,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> if let (Some(remote), Some(remote_store), Some(local_store)) = (remote, remote_store, local_store) { - return check_pull_privs(&auth_id, + return check_pull_privs(auth_id, local_store.as_str(), remote.as_str(), remote_store.as_str(), @@ -51,15 +51,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> } }, ("garbage_collection", Some(workerid)) => { - return user_info.check_privs(&auth_id, - &["datastore", &workerid], + return user_info.check_privs(auth_id, + &["datastore", workerid], PRIV_DATASTORE_MODIFY, true) }, ("prune", Some(workerid)) => { - return user_info.check_privs(&auth_id, + return user_info.check_privs(auth_id, &["datastore", - &workerid], + workerid], PRIV_DATASTORE_MODIFY, true); }, @@ -73,7 +73,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> fn check_job_store(upid: &UPID, store: &str) -> bool { match (upid.worker_type.as_str(), &upid.worker_id) { (workertype, Some(workerid)) if workertype.starts_with("verif") => { - if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(&workerid) { + if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) { if let Some(jobstore) = captures.get(1) { return store == jobstore.as_str(); } @@ -82,7 +82,7 @@ fn check_job_store(upid: &UPID, store: &str) -> bool { } } ("syncjob", Some(workerid)) => { - if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(&workerid) { + if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) { if let Some(local_store) = captures.get(3) { return store == local_store.as_str(); } @@ -112,7 +112,7 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> { // or task == job which the user/token could have configured/manually executed user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false) - .or_else(|_| check_job_privs(&auth_id, &user_info, upid)) + .or_else(|_| check_job_privs(auth_id, &user_info, upid)) .or_else(|_| bail!("task access not allowed")) } } @@ -250,7 +250,7 @@ async fn get_task_status( fn extract_upid(param: &Value) -> Result { - let upid_str = pbs_tools::json::required_string_param(¶m, "upid")?; + let upid_str = pbs_tools::json::required_string_param(param, "upid")?; upid_str.parse::() } @@ -569,7 +569,7 @@ const UPID_API_SUBDIRS: SubdirMap = &sorted!([ pub const UPID_API_ROUTER: Router = Router::new() .get(&list_subdirs_api_method!(UPID_API_SUBDIRS)) .delete(&API_METHOD_STOP_TASK) - .subdirs(&UPID_API_SUBDIRS); + .subdirs(UPID_API_SUBDIRS); pub const ROUTER: Router = Router::new() .get(&API_METHOD_LIST_TASKS) diff --git a/src/api2/status.rs b/src/api2/status.rs index 7f50914b..029529ac 100644 --- a/src/api2/status.rs +++ b/src/api2/status.rs @@ -91,13 +91,13 @@ pub fn datastore_status( let mut list = Vec::new(); for (store, (_, _)) in &config.sections { - let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; if !allowed { continue; } - let datastore = match DataStore::lookup_datastore(&store) { + let datastore = match DataStore::lookup_datastore(store) { Ok(datastore) => datastore, Err(err) => { list.push(json!({ diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index dc15908f..99a717d4 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -182,7 +182,7 @@ pub fn do_tape_backup_job( Some(lock_tape_device(&drive_config, &setup.drive)?) }; - let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid()); + let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid()); let email = lookup_user_email(notify_user); let upid_str = WorkerTask::new_thread( @@ -363,7 +363,7 @@ pub fn backup( let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); - let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid()); + let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid()); let email = lookup_user_email(notify_user); let upid_str = WorkerTask::new_thread( @@ -423,7 +423,7 @@ fn backup_worker( task_log!(worker, "update media online status"); let changer_name = update_media_online_status(&setup.drive)?; - let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?; + let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?; let mut pool_writer = PoolWriter::new( pool, @@ -443,7 +443,7 @@ fn backup_worker( }; let group_count_full = group_list.len(); - let list: Vec = group_list.into_iter().filter(|group| filter_fn(group, &group_filters)).collect(); + let list: Vec = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect(); let group_count = list.len(); task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full); (list, group_count) diff --git a/src/api2/tape/changer.rs b/src/api2/tape/changer.rs index 6a99ca37..d2ac65f6 100644 --- a/src/api2/tape/changer.rs +++ b/src/api2/tape/changer.rs @@ -96,7 +96,7 @@ pub async fn get_status( for (id, drive_status) in status.drives.iter().enumerate() { let mut state = None; if let Some(drive) = drive_map.get(&(id as u64)) { - state = get_tape_device_state(&config, &drive)?; + state = get_tape_device_state(&config, drive)?; } let entry = MtxStatusEntry { entry_kind: MtxEntryKind::Drive, @@ -231,7 +231,7 @@ const SUBDIRS: SubdirMap = &[ const ITEM_ROUTER: Router = Router::new() .get(&list_subdirs_api_method!(SUBDIRS)) - .subdirs(&SUBDIRS); + .subdirs(SUBDIRS); pub const ROUTER: Router = Router::new() .get(&API_METHOD_LIST_CHANGERS) diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index 265737f5..526743f6 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -542,7 +542,7 @@ fn write_media_label( let media_id = if let Some(ref pool) = pool { // assign media to pool by writing special media set label task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool); - let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None); + let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, label.ctime, None); drive.write_media_set_label(&set, None)?; @@ -1473,7 +1473,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([ const ITEM_ROUTER: Router = Router::new() .get(&list_subdirs_api_method!(SUBDIRS)) - .subdirs(&SUBDIRS); + .subdirs(SUBDIRS); pub const ROUTER: Router = Router::new() .get(&API_METHOD_LIST_DRIVES) diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 42fde2b0..918f034b 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -138,7 +138,7 @@ fn check_datastore_privs( auth_id: &Authid, owner: &Option, ) -> Result<(), Error> { - let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); + let privs = user_info.lookup_privs(auth_id, &["datastore", store]); if (privs & PRIV_DATASTORE_BACKUP) == 0 { bail!("no permissions on /datastore/{}", store); } @@ -220,7 +220,7 @@ pub fn restore( } for store in used_datastores.iter() { - check_datastore_privs(&user_info, &store, &auth_id, &owner)?; + check_datastore_privs(&user_info, store, &auth_id, &owner)?; } let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]); @@ -448,7 +448,7 @@ fn restore_list_worker( })?; let (owner, _group_lock) = - datastore.create_locked_backup_group(backup_dir.group(), &restore_owner)?; + datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?; if restore_owner != &owner { // only the owner is allowed to create additional snapshots bail!( @@ -460,7 +460,7 @@ fn restore_list_worker( } let (media_id, file_num) = if let Some((media_uuid, file_num)) = - catalog.lookup_snapshot(&source_datastore, &snapshot) + catalog.lookup_snapshot(source_datastore, snapshot) { let media_id = inventory.lookup_media(media_uuid).unwrap(); (media_id, file_num) @@ -516,7 +516,7 @@ fn restore_list_worker( let (drive, info) = request_and_load_media( &worker, &drive_config, - &drive_name, + drive_name, &media_id.label, &email, )?; @@ -568,7 +568,7 @@ fn restore_list_worker( let (mut drive, _info) = request_and_load_media( &worker, &drive_config, - &drive_name, + drive_name, &media_id.label, &email, )?; @@ -591,7 +591,7 @@ fn restore_list_worker( let backup_dir: BackupDir = snapshot.parse()?; let datastore = store_map - .get_datastore(&source_datastore) + .get_datastore(source_datastore) .ok_or_else(|| format_err!("unexpected source datastore: {}", source_datastore))?; let mut tmp_path = base_path.clone(); @@ -646,7 +646,7 @@ fn get_media_set_catalog( } Some(media_uuid) => { let media_id = inventory.lookup_media(media_uuid).unwrap(); - let media_catalog = MediaCatalog::open(status_path, &media_id, false, false)?; + let media_catalog = MediaCatalog::open(status_path, media_id, false, false)?; catalog.append_catalog(media_catalog)?; } } @@ -899,7 +899,7 @@ pub fn request_and_restore_media( Some(ref set) => &set.uuid, }; - let (mut drive, info) = request_and_load_media(&worker, &drive_config, &drive_name, &media_id.label, email)?; + let (mut drive, info) = request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?; match info.media_set_label { None => { @@ -923,7 +923,7 @@ pub fn request_and_restore_media( worker, &mut drive, &info, - Some((&store_map, restore_owner)), + Some((store_map, restore_owner)), checked_chunks_map, false, ) diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6e8cfc1f..e37c63be 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -301,7 +301,7 @@ pub fn verify_backup_dir( filter: Option<&dyn Fn(&BackupManifest) -> bool>, ) -> Result { let snap_lock = lock_dir_noblock_shared( - &verify_worker.datastore.snapshot_path(&backup_dir), + &verify_worker.datastore.snapshot_path(backup_dir), "snapshot", "locked by another operation", ); @@ -330,7 +330,7 @@ pub fn verify_backup_dir_with_lock( filter: Option<&dyn Fn(&BackupManifest) -> bool>, _snap_lock: Dir, ) -> Result { - let manifest = match verify_worker.datastore.load_manifest(&backup_dir) { + let manifest = match verify_worker.datastore.load_manifest(backup_dir) { Ok((manifest, _)) => manifest, Err(err) => { task_log!( @@ -365,10 +365,10 @@ pub fn verify_backup_dir_with_lock( let result = proxmox_lang::try_block!({ task_log!(verify_worker.worker, " check {}", info.filename); match archive_type(&info.filename)? { - ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info), - ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info), + ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info), + ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info), ArchiveType::Blob => { - verify_blob(verify_worker.datastore.clone(), &backup_dir, info) + verify_blob(verify_worker.datastore.clone(), backup_dir, info) } } }); @@ -397,7 +397,7 @@ pub fn verify_backup_dir_with_lock( let verify_state = serde_json::to_value(verify_state)?; verify_worker .datastore - .update_manifest(&backup_dir, |manifest| { + .update_manifest(backup_dir, |manifest| { manifest.unprotected["verify_state"] = verify_state; }) .map_err(|err| format_err!("unable to update manifest blob - {}", err))?; diff --git a/src/bin/docgen.rs b/src/bin/docgen.rs index 4be4f1f2..a71c9e8b 100644 --- a/src/bin/docgen.rs +++ b/src/bin/docgen.rs @@ -270,7 +270,7 @@ fn dump_api_method_schema( data["parameters"] = dump_property_schema(&api_method.parameters); - let mut returns = dump_schema(&api_method.returns.schema); + let mut returns = dump_schema(api_method.returns.schema); if api_method.returns.optional { returns["optional"] = 1.into(); } diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index 5e5babd1..287cff0a 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -730,7 +730,7 @@ async fn schedule_datastore_verify_jobs() { let worker_type = "verificationjob"; let auth_id = Authid::root_auth_id().clone(); if check_schedule(worker_type, &event_str, &job_id) { - let job = match Job::new(&worker_type, &job_id) { + let job = match Job::new(worker_type, &job_id) { Ok(job) => job, Err(_) => continue, // could not get lock }; @@ -766,7 +766,7 @@ async fn schedule_tape_backup_jobs() { let worker_type = "tape-backup-job"; let auth_id = Authid::root_auth_id().clone(); if check_schedule(worker_type, &event_str, &job_id) { - let job = match Job::new(&worker_type, &job_id) { + let job = match Job::new(worker_type, &job_id) { Ok(job) => job, Err(_) => continue, // could not get lock }; @@ -1033,7 +1033,7 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { } }; - let last = match jobstate::last_run_time(worker_type, &id) { + let last = match jobstate::last_run_time(worker_type, id) { Ok(time) => time, Err(err) => { eprintln!("could not get last run time of {} {}: {}", worker_type, id, err); diff --git a/src/bin/proxmox_backup_debug/api.rs b/src/bin/proxmox_backup_debug/api.rs index 459d1d4f..dd8c0e6b 100644 --- a/src/bin/proxmox_backup_debug/api.rs +++ b/src/bin/proxmox_backup_debug/api.rs @@ -94,7 +94,7 @@ async fn get_child_links( path: &str, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let (path, components) = normalize_uri_path(&path)?; + let (path, components) = normalize_uri_path(path)?; let info = &proxmox_backup::api2::ROUTER .find_route(&components, &mut HashMap::new()) @@ -132,7 +132,7 @@ fn get_api_method( _ => unreachable!(), }; let mut uri_param = HashMap::new(); - let (path, components) = normalize_uri_path(&path)?; + let (path, components) = normalize_uri_path(path)?; if let Some(method) = &proxmox_backup::api2::ROUTER.find_method(&components, method.clone(), &mut uri_param) { @@ -446,7 +446,7 @@ async fn ls(path: Option, mut param: Value, rpcenv: &mut dyn RpcEnvironm &mut serde_json::to_value(res)?, &proxmox_schema::ReturnType { optional: false, - schema: &LS_SCHEMA, + schema: LS_SCHEMA, }, &output_format, &options, diff --git a/src/bin/proxmox_backup_debug/inspect.rs b/src/bin/proxmox_backup_debug/inspect.rs index 9a1d0ac0..953dc738 100644 --- a/src/bin/proxmox_backup_debug/inspect.rs +++ b/src/bin/proxmox_backup_debug/inspect.rs @@ -51,7 +51,7 @@ fn decode_blob( if blob.is_encrypted() && key_file.is_some() { let (key, _created, _fingerprint) = - load_and_decrypt_key(&key_file.unwrap(), &get_encryption_key_password)?; + load_and_decrypt_key(key_file.unwrap(), &get_encryption_key_password)?; crypt_conf = CryptConfig::new(key)?; crypt_conf_opt = Some(&crypt_conf); } diff --git a/src/bin/proxmox_backup_debug/recover.rs b/src/bin/proxmox_backup_debug/recover.rs index ff3b1377..0c062e38 100644 --- a/src/bin/proxmox_backup_debug/recover.rs +++ b/src/bin/proxmox_backup_debug/recover.rs @@ -72,7 +72,7 @@ fn recover_index( let crypt_conf_opt = if let Some(key_file_path) = key_file_path { let (key, _created, _fingerprint) = - load_and_decrypt_key(&key_file_path, &get_encryption_key_password)?; + load_and_decrypt_key(key_file_path, &get_encryption_key_password)?; Some(CryptConfig::new(key)?) } else { None diff --git a/src/bin/proxmox_backup_manager/acl.rs b/src/bin/proxmox_backup_manager/acl.rs index 8180631b..f68fea1a 100644 --- a/src/bin/proxmox_backup_manager/acl.rs +++ b/src/bin/proxmox_backup_manager/acl.rs @@ -55,7 +55,7 @@ fn list_acls(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result CommandLineInterface { let cmd_def = CliCommandMap::new() - .insert("list", CliCommand::new(&&API_METHOD_LIST_ACLS)) + .insert("list", CliCommand::new(&API_METHOD_LIST_ACLS)) .insert( "update", CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL) diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 4409be9f..f1fd02ca 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -93,7 +93,7 @@ async fn create_datastore(mut param: Value) -> Result { let mut client = connect_to_localhost()?; - let result = client.post(&"api2/json/config/datastore", Some(param)).await?; + let result = client.post("api2/json/config/datastore", Some(param)).await?; view_task_result(&mut client, result, &output_format).await?; diff --git a/src/bin/proxmox_backup_manager/openid.rs b/src/bin/proxmox_backup_manager/openid.rs index 1a746457..8390eba7 100644 --- a/src/bin/proxmox_backup_manager/openid.rs +++ b/src/bin/proxmox_backup_manager/openid.rs @@ -73,8 +73,8 @@ fn show_openid_realm(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result CommandLineInterface { let cmd_def = CliCommandMap::new() - .insert("list", CliCommand::new(&&API_METHOD_LIST_OPENID_REALMS)) - .insert("show", CliCommand::new(&&API_METHOD_SHOW_OPENID_REALM) + .insert("list", CliCommand::new(&API_METHOD_LIST_OPENID_REALMS)) + .insert("show", CliCommand::new(&API_METHOD_SHOW_OPENID_REALM) .arg_param(&["realm"]) .completion_cb("realm", pbs_config::domains::complete_openid_realm_name) ) diff --git a/src/bin/proxmox_backup_manager/remote.rs b/src/bin/proxmox_backup_manager/remote.rs index c83ec1e2..bc47d421 100644 --- a/src/bin/proxmox_backup_manager/remote.rs +++ b/src/bin/proxmox_backup_manager/remote.rs @@ -75,7 +75,7 @@ fn show_remote(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result CommandLineInterface { let cmd_def = CliCommandMap::new() - .insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES)) + .insert("list", CliCommand::new(&API_METHOD_LIST_REMOTES)) .insert( "show", CliCommand::new(&API_METHOD_SHOW_REMOTE) diff --git a/src/bin/proxmox_backup_manager/traffic_control.rs b/src/bin/proxmox_backup_manager/traffic_control.rs index 83d3dfff..e8b64b3a 100644 --- a/src/bin/proxmox_backup_manager/traffic_control.rs +++ b/src/bin/proxmox_backup_manager/traffic_control.rs @@ -94,7 +94,7 @@ async fn show_current_traffic(param: Value) -> Result { let client = connect_to_localhost()?; - let mut result = client.get(&"api2/json/admin/traffic-control", None).await?; + let mut result = client.get("api2/json/admin/traffic-control", None).await?; let mut data = result["data"].take(); diff --git a/src/bin/proxmox_backup_manager/user.rs b/src/bin/proxmox_backup_manager/user.rs index 950601d7..43c23d52 100644 --- a/src/bin/proxmox_backup_manager/user.rs +++ b/src/bin/proxmox_backup_manager/user.rs @@ -171,7 +171,7 @@ fn list_permissions(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result CommandLineInterface { let cmd_def = CliCommandMap::new() - .insert("list", CliCommand::new(&&API_METHOD_LIST_USERS)) + .insert("list", CliCommand::new(&API_METHOD_LIST_USERS)) .insert( "create", // fixme: howto handle password parameter? @@ -192,7 +192,7 @@ pub fn user_commands() -> CommandLineInterface { ) .insert( "list-tokens", - CliCommand::new(&&API_METHOD_LIST_TOKENS) + CliCommand::new(&API_METHOD_LIST_TOKENS) .arg_param(&["userid"]) .completion_cb("userid", pbs_config::user::complete_userid) ) @@ -211,7 +211,7 @@ pub fn user_commands() -> CommandLineInterface { ) .insert( "permissions", - CliCommand::new(&&API_METHOD_LIST_PERMISSIONS) + CliCommand::new(&API_METHOD_LIST_PERMISSIONS) .arg_param(&["auth-id"]) .completion_cb("auth-id", pbs_config::user::complete_authid) .completion_cb("path", pbs_config::datastore::complete_acl_path) diff --git a/src/bin/sg-tape-cmd.rs b/src/bin/sg-tape-cmd.rs index 10966d90..7edfe5c5 100644 --- a/src/bin/sg-tape-cmd.rs +++ b/src/bin/sg-tape-cmd.rs @@ -34,12 +34,12 @@ fn get_tape_handle(param: &Value) -> Result { let handle = if let Some(name) = param["drive"].as_str() { let (config, _digest) = pbs_config::drive::config()?; - let drive: LtoTapeDrive = config.lookup("lto", &name)?; + let drive: LtoTapeDrive = config.lookup("lto", name)?; eprintln!("using device {}", drive.path); open_lto_tape_drive(&drive)? } else if let Some(device) = param["device"].as_str() { eprintln!("using device {}", device); - LtoTapeHandle::new(open_lto_tape_device(&device)?)? + LtoTapeHandle::new(open_lto_tape_device(device)?)? } else if let Some(true) = param["stdin"].as_bool() { eprintln!("using stdin"); let fd = std::io::stdin().as_raw_fd(); @@ -62,7 +62,7 @@ fn get_tape_handle(param: &Value) -> Result { if drive_names.len() == 1 { let name = drive_names[0]; - let drive: LtoTapeDrive = config.lookup("lto", &name)?; + let drive: LtoTapeDrive = config.lookup("lto", name)?; eprintln!("using device {}", drive.path); open_lto_tape_drive(&drive)? } else { diff --git a/src/config/mod.rs b/src/config/mod.rs index 77d6170a..ae9ad03e 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -185,7 +185,7 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<( create_configdir()?; pbs_config::replace_backup_config(&key_path, key_pem) .map_err(|err| format_err!("error writing certificate private key - {}", err))?; - pbs_config::replace_backup_config(&cert_path, &cert_pem) + pbs_config::replace_backup_config(&cert_path, cert_pem) .map_err(|err| format_err!("error writing certificate file - {}", err))?; Ok(()) diff --git a/src/config/node.rs b/src/config/node.rs index ebbd08bd..1c3ef492 100644 --- a/src/config/node.rs +++ b/src/config/node.rs @@ -141,7 +141,7 @@ impl NodeConfig { /// Returns the parsed ProxyConfig pub fn http_proxy(&self) -> Option { if let Some(http_proxy) = &self.http_proxy { - match ProxyConfig::parse_proxy_url(&http_proxy) { + match ProxyConfig::parse_proxy_url(http_proxy) { Ok(proxy) => Some(proxy), Err(_) => None, } diff --git a/src/server/auth.rs b/src/server/auth.rs index 33d655d4..6a305690 100644 --- a/src/server/auth.rs +++ b/src/server/auth.rs @@ -78,7 +78,7 @@ pub async fn check_pbs_auth( verify_csrf_prevention_token( csrf_secret(), &userid, - &csrf_token, + csrf_token, -300, ticket_lifetime, )?; diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs index cf48e615..33b106b4 100644 --- a/src/server/email_notifications.rs +++ b/src/server/email_notifications.rs @@ -245,8 +245,8 @@ fn send_job_status_mail( sendmail( &[email], - &subject, - Some(&text), + subject, + Some(text), Some(&html), None, Some(&author), diff --git a/src/server/pull.rs b/src/server/pull.rs index 8f964009..8b52e8c2 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -438,7 +438,7 @@ async fn pull_snapshot( &mut chunk_reader, tgt_store.clone(), snapshot, - &item, + item, downloaded_chunks.clone(), ) .await?; @@ -465,7 +465,7 @@ pub async fn pull_snapshot_from( snapshot: &BackupDir, downloaded_chunks: Arc>>, ) -> Result<(), Error> { - let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?; + let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?; if is_new { task_log!(worker, "sync snapshot {:?}", snapshot.relative_path()); @@ -474,12 +474,12 @@ pub async fn pull_snapshot_from( worker, reader, tgt_store.clone(), - &snapshot, + snapshot, downloaded_chunks, ) .await { - if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) { + if let Err(cleanup_err) = tgt_store.remove_backup_dir(snapshot, true) { task_log!(worker, "cleanup error - {}", cleanup_err); } return Err(err); @@ -491,7 +491,7 @@ pub async fn pull_snapshot_from( worker, reader, tgt_store.clone(), - &snapshot, + snapshot, downloaded_chunks, ) .await?; @@ -713,7 +713,7 @@ pub async fn pull_store( let list:Vec = list .into_iter() .filter(|group| { - apply_filters(&group, group_filter) + apply_filters(group, group_filter) }) .collect(); task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count); diff --git a/src/tape/changer/mod.rs b/src/tape/changer/mod.rs index 6b2477d2..a6bc7aa8 100644 --- a/src/tape/changer/mod.rs +++ b/src/tape/changer/mod.rs @@ -265,9 +265,9 @@ impl ScsiMediaChange for ScsiTapeChanger { } let status = if USE_MTX { - mtx::mtx_status(&self) + mtx::mtx_status(self) } else { - sg_pt_changer::status(&self) + sg_pt_changer::status(self) }; match &status { diff --git a/src/tape/changer/mtx/mtx_wrapper.rs b/src/tape/changer/mtx/mtx_wrapper.rs index 796b782f..5dbe4fa3 100644 --- a/src/tape/changer/mtx/mtx_wrapper.rs +++ b/src/tape/changer/mtx/mtx_wrapper.rs @@ -21,7 +21,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result { let mut status = parse_mtx_status(&output)?; - status.mark_import_export_slots(&config)?; + status.mark_import_export_slots(config)?; Ok(status) } diff --git a/src/tape/changer/mtx/parse_mtx_status.rs b/src/tape/changer/mtx/parse_mtx_status.rs index 9f5fa02b..389c744d 100644 --- a/src/tape/changer/mtx/parse_mtx_status.rs +++ b/src/tape/changer/mtx/parse_mtx_status.rs @@ -203,7 +203,7 @@ Data Transfer Element 1:Empty Storage Element 24 IMPORT/EXPORT:Empty "###; - let _ = parse_mtx_status(&output)?; + let _ = parse_mtx_status(output)?; Ok(()) } diff --git a/src/tape/changer/online_status_map.rs b/src/tape/changer/online_status_map.rs index e86181b3..3284e0e1 100644 --- a/src/tape/changer/online_status_map.rs +++ b/src/tape/changer/online_status_map.rs @@ -192,11 +192,11 @@ pub fn update_changer_online_status( let mut online_map = OnlineStatusMap::new(drive_config)?; let mut online_set = HashSet::new(); for label_text in label_text_list.iter() { - if let Some(media_id) = inventory.find_media_by_label_text(&label_text) { + if let Some(media_id) = inventory.find_media_by_label_text(label_text) { online_set.insert(media_id.label.uuid.clone()); } } - online_map.update_online_status(&changer_name, online_set)?; + online_map.update_online_status(changer_name, online_set)?; inventory.update_online_status(&online_map)?; Ok(()) diff --git a/src/tape/inventory.rs b/src/tape/inventory.rs index c76e50cd..229e8b8d 100644 --- a/src/tape/inventory.rs +++ b/src/tape/inventory.rs @@ -827,7 +827,7 @@ pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap) None => return Vec::new(), }; let status_path = Path::new(TAPE_STATUS_DIR); - let inventory = match Inventory::load(&status_path) { + let inventory = match Inventory::load(status_path) { Ok(inventory) => inventory, Err(_) => return Vec::new(), }; diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index 7ec622b8..31672f02 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -413,7 +413,7 @@ impl MediaCatalog { let uuid = &media_id.label.uuid; - let me = Self::create_temporary_database(base_path, &media_id, log_to_stdout)?; + let me = Self::create_temporary_database(base_path, media_id, log_to_stdout)?; Self::finish_temporary_database(base_path, uuid, true)?; diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index 6a947b22..6c2a21b7 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -289,7 +289,7 @@ impl MediaPool { create_new_set = Some(String::from("policy is AlwaysCreate")); } MediaSetPolicy::CreateAt(event) => { - if let Some(set_start_time) = self.inventory.media_set_start_time(&self.current_media_set.uuid()) { + if let Some(set_start_time) = self.inventory.media_set_start_time(self.current_media_set.uuid()) { if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) { if current_time >= alloc_time { create_new_set = Some(String::from("policy CreateAt event triggered")); @@ -407,7 +407,7 @@ impl MediaPool { for media_id in media_list { - let (status, location) = self.compute_media_state(&media_id); + let (status, location) = self.compute_media_state(media_id); if media_id.media_set_label.is_some() { continue; } // should not happen if !self.location_is_available(&location) { @@ -478,7 +478,7 @@ impl MediaPool { continue; } - if !self.media_is_expired(&media, current_time) { + if !self.media_is_expired(media, current_time) { continue; } diff --git a/src/tape/pool_writer/catalog_set.rs b/src/tape/pool_writer/catalog_set.rs index 7f42703d..ca488835 100644 --- a/src/tape/pool_writer/catalog_set.rs +++ b/src/tape/pool_writer/catalog_set.rs @@ -63,7 +63,7 @@ impl CatalogSet { } // remove read-only version from set (in case it is there) - self.media_set_catalog.remove_catalog(&new_catalog.uuid()); + self.media_set_catalog.remove_catalog(new_catalog.uuid()); self.catalog = Some(new_catalog); diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index 5d10e16b..29860b53 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -117,7 +117,7 @@ impl PoolWriter { /// Set media status to FULL (persistent - stores pool status) pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> { - self.pool.set_media_status_full(&uuid)?; + self.pool.set_media_status_full(uuid)?; Ok(()) } @@ -556,7 +556,7 @@ fn write_chunk_archive<'a>( //println!("CHUNK {} size {}", hex::encode(digest), blob.raw_size()); - match writer.try_write_chunk(&digest, &blob) { + match writer.try_write_chunk(digest, blob) { Ok(true) => { chunk_list.push(*digest); chunk_iter.next(); // consume @@ -627,7 +627,7 @@ fn update_media_set_label( if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint { bail!("detected changed encryption fingerprint - internal error"); } - media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?; + media_catalog = MediaCatalog::open(status_path, media_id, true, false)?; // todo: verify last content/media_catalog somehow? diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs index 8e759a2f..381b51d3 100644 --- a/src/tape/pool_writer/new_chunks_iterator.rs +++ b/src/tape/pool_writer/new_chunks_iterator.rs @@ -53,7 +53,7 @@ impl NewChunksIterator { continue; } - if catalog_set.lock().unwrap().contains_chunk(&datastore_name, &digest) { + if catalog_set.lock().unwrap().contains_chunk(datastore_name, &digest) { continue; }; diff --git a/src/tools/apt.rs b/src/tools/apt.rs index 3e016cfe..3569a006 100644 --- a/src/tools/apt.rs +++ b/src/tools/apt.rs @@ -25,7 +25,7 @@ pub struct PkgState { pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> { let serialized_state = serde_json::to_string(state)?; - replace_file(APT_PKG_STATE_FN, &serialized_state.as_bytes(), CreateOptions::new(), false) + replace_file(APT_PKG_STATE_FN, serialized_state.as_bytes(), CreateOptions::new(), false) .map_err(|err| format_err!("Error writing package cache - {}", err))?; Ok(()) } @@ -206,7 +206,7 @@ pub fn list_installed_apt_packages bool>( drop(cache_iter); // also loop through missing dependencies, as they would be installed for pkg in depends.iter() { - let mut iter = cache.find_by_name(&pkg); + let mut iter = cache.find_by_name(pkg); let view = match iter.next() { Some(view) => view, None => continue // package not found, ignore diff --git a/src/tools/disks/zfs.rs b/src/tools/disks/zfs.rs index 2f18919d..3da4b603 100644 --- a/src/tools/disks/zfs.rs +++ b/src/tools/disks/zfs.rs @@ -20,7 +20,7 @@ lazy_static!{ pub fn get_pool_from_dataset(dataset: &OsStr) -> Option<&OsStr> { if let Some(dataset) = dataset.to_str() { if let Some(idx) = dataset.find('/') { - return Some(&dataset[0..idx].as_ref()); + return Some(dataset[0..idx].as_ref()); } } diff --git a/src/tools/disks/zpool_list.rs b/src/tools/disks/zpool_list.rs index e68cfef8..09667dfc 100644 --- a/src/tools/disks/zpool_list.rs +++ b/src/tools/disks/zpool_list.rs @@ -157,13 +157,13 @@ fn test_zfs_parse_list() -> Result<(), Error> { let output = ""; - let data = parse_zpool_list(&output)?; + let data = parse_zpool_list(output)?; let expect = Vec::new(); assert_eq!(data, expect); let output = "btest 427349245952 405504 427348840448 - - 0 0 1.00 ONLINE -\n"; - let data = parse_zpool_list(&output)?; + let data = parse_zpool_list(output)?; let expect = vec![ ZFSPoolInfo { name: "btest".to_string(), @@ -190,7 +190,7 @@ logs "; - let data = parse_zpool_list(&output)?; + let data = parse_zpool_list(output)?; let expect = vec![ ZFSPoolInfo { name: String::from("rpool"), @@ -232,7 +232,7 @@ logs - - - - - - - - - /dev/sda5 213674622976 0 213674622976 - - 0 0 - ONLINE "; - let data = parse_zpool_list(&output)?; + let data = parse_zpool_list(output)?; let expect = vec![ ZFSPoolInfo { name: String::from("b-test"), @@ -267,7 +267,7 @@ b.test 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE - /dev/sda1 - - - - - - - - ONLINE "; - let data = parse_zpool_list(&output)?; + let data = parse_zpool_list(output)?; let expect = vec![ ZFSPoolInfo { name: String::from("b.test"), diff --git a/src/tools/disks/zpool_status.rs b/src/tools/disks/zpool_status.rs index b2d8e35b..4601b14f 100644 --- a/src/tools/disks/zpool_status.rs +++ b/src/tools/disks/zpool_status.rs @@ -189,7 +189,7 @@ pub fn parse_zpool_status_config_tree(i: &str) -> Result, } fn parse_zpool_status(input: &str) -> Result, Error> { - parse_complete("zfs status output", &input, many0(parse_zpool_status_field)) + parse_complete("zfs status output", input, many0(parse_zpool_status_field)) } pub fn vdev_list_to_tree(vdev_list: &[ZFSPoolVDevState]) -> Result { @@ -220,7 +220,7 @@ where }; for item in items { - let (node, node_level) = to_node(&item); + let (node, node_level) = to_node(item); let vdev_level = 1 + node_level; let mut node = match node { Value::Object(map) => map, @@ -373,7 +373,7 @@ pub fn zpool_status(pool: &str) -> Result, Error> { fn test_parse(output: &str) -> Result<(), Error> { let mut found_config = false; - for (k, v) in parse_zpool_status(&output)? { + for (k, v) in parse_zpool_status(output)? { println!("<{}> => '{}'", k, v); if k == "config" { let vdev_list = parse_zpool_status_config_tree(&v)?; diff --git a/src/tools/systemd/config.rs b/src/tools/systemd/config.rs index f0e7f342..95c1a942 100644 --- a/src/tools/systemd/config.rs +++ b/src/tools/systemd/config.rs @@ -125,7 +125,7 @@ pub fn parse_systemd_mount(filename: &str) -> Result { } fn save_systemd_config(config: &SectionConfig, filename: &str, data: &SectionConfigData) -> Result<(), Error> { - let raw = config.write(filename, &data)?; + let raw = config.write(filename, data)?; let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644); // set the correct owner/group/permissions while saving file, owner(rw) = root