diff --git a/src/api2/access/tfa.rs b/src/api2/access/tfa.rs index 0127f54c..d75b7f95 100644 --- a/src/api2/access/tfa.rs +++ b/src/api2/access/tfa.rs @@ -181,7 +181,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result { if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) { match { - // scope to prevent the temprary iter from borrowing across the whole match + // scope to prevent the temporary iter from borrowing across the whole match let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id); entry.map(|(ty, index, _)| (ty, index)) } { @@ -259,7 +259,7 @@ fn delete_tfa( .ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?; match { - // scope to prevent the temprary iter from borrowing across the whole match + // scope to prevent the temporary iter from borrowing across the whole match let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id); entry.map(|(ty, index, _)| (ty, index)) } { diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index d619bf8d..f673e2ca 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -1,4 +1,4 @@ -//! Datastore Syncronization Job Management +//! Datastore Synchronization Job Management use anyhow::{bail, format_err, Error}; use serde_json::Value; diff --git a/src/api2/config/tape_encryption_keys.rs b/src/api2/config/tape_encryption_keys.rs index 46781130..31be0ed1 100644 --- a/src/api2/config/tape_encryption_keys.rs +++ b/src/api2/config/tape_encryption_keys.rs @@ -119,7 +119,7 @@ pub fn change_passphrase( let kdf = kdf.unwrap_or_default(); if let Kdf::None = kdf { - bail!("Please specify a key derivation funktion (none is not allowed here)."); + bail!("Please specify a key derivation function (none is not allowed here)."); } let _lock = open_file_locked( @@ -187,7 +187,7 @@ pub fn create_key( let kdf = kdf.unwrap_or_default(); if let Kdf::None = kdf { - bail!("Please specify a key derivation funktion (none is not allowed here)."); + bail!("Please specify a key derivation function (none is not allowed here)."); } let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?; diff --git a/src/api2/node/apt.rs b/src/api2/node/apt.rs index 9d723f17..e77b89fa 100644 --- a/src/api2/node/apt.rs +++ b/src/api2/node/apt.rs @@ -85,7 +85,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { }, notify: { type: bool, - description: r#"Send notification mail about new package updates availanle to the + description: r#"Send notification mail about new package updates available to the email address configured for 'root@pam')."#, default: false, optional: true, diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index db9b919e..9e86d8f8 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -220,7 +220,7 @@ pub async fn load_slot(drive: String, source_slot: u64) -> Result<(), Error> { }, }, returns: { - description: "The import-export slot number the media was transfered to.", + description: "The import-export slot number the media was transferred to.", type: u64, minimum: 1, }, @@ -782,7 +782,7 @@ pub fn clean_drive( } } - worker.log("Drive cleaned sucessfully"); + worker.log("Drive cleaned successfully"); Ok(()) }, @@ -943,7 +943,7 @@ pub fn update_inventory( } Ok((Some(media_id), _key_config)) => { if label_text != media_id.label.label_text { - worker.warn(format!("label text missmatch ({} != {})", label_text, media_id.label.label_text)); + worker.warn(format!("label text mismatch ({} != {})", label_text, media_id.label.label_text)); continue; } worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid)); diff --git a/src/api2/tape/media.rs b/src/api2/tape/media.rs index ecfe41bf..963ae9bc 100644 --- a/src/api2/tape/media.rs +++ b/src/api2/tape/media.rs @@ -497,7 +497,7 @@ pub fn get_media_status(uuid: Uuid) -> Result { /// Update media status (None, 'full', 'damaged' or 'retired') /// /// It is not allowed to set status to 'writable' or 'unknown' (those -/// are internaly managed states). +/// are internally managed states). pub fn update_media_status(uuid: Uuid, status: Option) -> Result<(), Error> { let status_path = Path::new(TAPE_STATUS_DIR); diff --git a/src/api2/types/mod.rs b/src/api2/types/mod.rs index b72d54c1..3e720dad 100644 --- a/src/api2/types/mod.rs +++ b/src/api2/types/mod.rs @@ -1272,7 +1272,7 @@ pub struct APTUpdateInfo { pub enum Notify { /// Never send notification Never, - /// Send notifications for failed and sucessful jobs + /// Send notifications for failed and successful jobs Always, /// Send notifications for failed jobs only Error, diff --git a/src/api2/types/tape/device.rs b/src/api2/types/tape/device.rs index 811cd36f..368a0015 100644 --- a/src/api2/types/tape/device.rs +++ b/src/api2/types/tape/device.rs @@ -21,7 +21,7 @@ pub struct OptionalDeviceIdentification { #[api()] #[derive(Debug,Serialize,Deserialize)] #[serde(rename_all = "kebab-case")] -/// Kind of devive +/// Kind of device pub enum DeviceKind { /// Tape changer (Autoloader, Robot) Changer, diff --git a/src/backup.rs b/src/backup.rs index 62a40dde..cca43881 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -75,7 +75,7 @@ //! //! Since PBS allows multiple potentially interfering operations at the //! same time (e.g. garbage collect, prune, multiple backup creations -//! (only in seperate groups), forget, ...), these need to lock against +//! (only in separate groups), forget, ...), these need to lock against //! each other in certain scenarios. There is no overarching global lock //! though, instead always the finest grained lock possible is used, //! because running these operations concurrently is treated as a feature diff --git a/src/backup/chunk_store.rs b/src/backup/chunk_store.rs index cc4a9435..31e8307c 100644 --- a/src/backup/chunk_store.rs +++ b/src/backup/chunk_store.rs @@ -452,7 +452,7 @@ impl ChunkStore { #[test] fn test_chunk_store1() { - let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path + let mut path = std::fs::canonicalize(".").unwrap(); // we need absolute path path.push(".testdir"); if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ } diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index f8298ae8..28dda7e7 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -448,7 +448,7 @@ impl DataStore { if !self.chunk_store.cond_touch_chunk(digest, false)? { crate::task_warn!( worker, - "warning: unable to access non-existant chunk {}, required by {:?}", + "warning: unable to access non-existent chunk {}, required by {:?}", proxmox::tools::digest_to_hex(digest), file_name, ); diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index 1b8b5bec..5aae0873 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -1453,7 +1453,7 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) { type: String, description: r###"Target directory path. Use '-' to write to standard output. -We do not extraxt '.pxar' archives when writing to standard output. +We do not extract '.pxar' archives when writing to standard output. "### }, diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 68934c28..105a11f8 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -330,7 +330,7 @@ async fn get_versions(verbose: bool, param: Value) -> Result { let options = default_table_format_options() .disable_sort() - .noborder(true) // just not helpfull for version info which gets copy pasted often + .noborder(true) // just not helpful for version info which gets copy pasted often .column(ColumnConfig::new("Package")) .column(ColumnConfig::new("Version")) .column(ColumnConfig::new("ExtraInfo").header("Extra Info")) diff --git a/src/bin/proxmox_backup_client/key.rs b/src/bin/proxmox_backup_client/key.rs index 6e18a026..76b135a2 100644 --- a/src/bin/proxmox_backup_client/key.rs +++ b/src/bin/proxmox_backup_client/key.rs @@ -527,7 +527,7 @@ fn show_master_pubkey(path: Option, param: Value) -> Result<(), Error> { optional: true, }, subject: { - description: "Include the specified subject as titel text.", + description: "Include the specified subject as title text.", optional: true, }, "output-format": { diff --git a/src/bin/proxmox_backup_client/mount.rs b/src/bin/proxmox_backup_client/mount.rs index 24100752..be6aca05 100644 --- a/src/bin/proxmox_backup_client/mount.rs +++ b/src/bin/proxmox_backup_client/mount.rs @@ -140,7 +140,7 @@ fn mount( return proxmox_backup::tools::runtime::main(mount_do(param, None)); } - // Process should be deamonized. + // Process should be daemonized. // Make sure to fork before the async runtime is instantiated to avoid troubles. let (pr, pw) = proxmox_backup::tools::pipe()?; match unsafe { fork() } { diff --git a/src/bin/proxmox_tape/encryption_key.rs b/src/bin/proxmox_tape/encryption_key.rs index 9177a377..907d2d63 100644 --- a/src/bin/proxmox_tape/encryption_key.rs +++ b/src/bin/proxmox_tape/encryption_key.rs @@ -84,7 +84,7 @@ pub fn encryption_key_commands() -> CommandLineInterface { schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, }, subject: { - description: "Include the specified subject as titel text.", + description: "Include the specified subject as title text.", optional: true, }, "output-format": { @@ -128,7 +128,7 @@ fn paper_key( }, }, )] -/// Print tthe encryption key's metadata. +/// Print the encryption key's metadata. fn show_key( param: Value, rpcenv: &mut dyn RpcEnvironment, diff --git a/src/bin/sg-tape-cmd.rs b/src/bin/sg-tape-cmd.rs index 475b2cbb..86998972 100644 --- a/src/bin/sg-tape-cmd.rs +++ b/src/bin/sg-tape-cmd.rs @@ -1,6 +1,6 @@ /// Tape command implemented using scsi-generic raw commands /// -/// SCSI-generic command needs root priviledges, so this binary need +/// SCSI-generic command needs root privileges, so this binary need /// to be setuid root. /// /// This command can use STDIN as tape device handle. diff --git a/src/buildcfg.rs b/src/buildcfg.rs index 9aff8b4b..4f333288 100644 --- a/src/buildcfg.rs +++ b/src/buildcfg.rs @@ -16,11 +16,11 @@ pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!(); /// namespaced directory for persistent logging pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!(); -/// logfile for all API reuests handled by the proxy and privileged API daemons. Note that not all +/// logfile for all API requests handled by the proxy and privileged API daemons. Note that not all /// failed logins can be logged here with full information, use the auth log for that. pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log"); -/// logfile for any failed authentication, via ticket or via token, and new successfull ticket +/// logfile for any failed authentication, via ticket or via token, and new successful ticket /// creations. This file can be useful for fail2ban. pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log"); diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs index 01ea7704..cef7edef 100644 --- a/src/client/backup_writer.rs +++ b/src/client/backup_writer.rs @@ -509,7 +509,7 @@ impl BackupWriter { } // We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other - // funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use + // function in the same path is `wid`, so those 3 could be in a struct, but there's no real use // since this is a private method. #[allow(clippy::too_many_arguments)] fn upload_chunk_info_stream( diff --git a/src/client/vsock_client.rs b/src/client/vsock_client.rs index ce3f7bc7..d78f2a8a 100644 --- a/src/client/vsock_client.rs +++ b/src/client/vsock_client.rs @@ -86,7 +86,7 @@ impl tower_service::Service for VsockConnector { Ok(connection) }) - // unravel the thread JoinHandle to a useable future + // unravel the thread JoinHandle to a usable future .map(|res| match res { Ok(res) => res, Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)), diff --git a/src/config/network/helper.rs b/src/config/network/helper.rs index 0449caac..2ec0c1ea 100644 --- a/src/config/network/helper.rs +++ b/src/config/network/helper.rs @@ -82,7 +82,7 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> { Ok(()) } -// parse ip address with otional cidr mask +// parse ip address with optional cidr mask pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option, bool), Error> { lazy_static! { diff --git a/src/config/tape_encryption_keys.rs b/src/config/tape_encryption_keys.rs index d9a83afb..42c3184d 100644 --- a/src/config/tape_encryption_keys.rs +++ b/src/config/tape_encryption_keys.rs @@ -4,10 +4,10 @@ //! indexed by key fingerprint. //! //! We store the plain key (unencrypted), as well as a encrypted -//! version protected by passowrd (see struct `KeyConfig`) +//! version protected by password (see struct `KeyConfig`) //! //! Tape backups store the password protected version on tape, so that -//! it is possible to retore the key from tape if you know the +//! it is possible to restore the key from tape if you know the //! password. use std::collections::HashMap; diff --git a/src/config/tfa.rs b/src/config/tfa.rs index 7c656d20..6b65f6a5 100644 --- a/src/config/tfa.rs +++ b/src/config/tfa.rs @@ -590,7 +590,7 @@ impl TfaUserChallengeData { } /// Save the current data. Note that we do not replace the file here since we lock the file - /// itself, as it is in `/run`, and the typicall error case for this particular situation + /// itself, as it is in `/run`, and the typical error case for this particular situation /// (machine loses power) simply prevents some login, but that'll probably fail anyway for /// other reasons then... /// diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs index f09a8931..306e84d0 100644 --- a/src/server/email_notifications.rs +++ b/src/server/email_notifications.rs @@ -43,7 +43,7 @@ Deduplication Factor: {{deduplication-factor}} Garbage collection successful. -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -57,7 +57,7 @@ Datastore: {{datastore}} Garbage collection failed: {{error}} -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -71,7 +71,7 @@ Datastore: {{job.store}} Verification successful. -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -89,7 +89,7 @@ Verification failed on these snapshots/groups: {{/each}} -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -105,7 +105,7 @@ Remote Store: {{job.remote-store}} Synchronization successful. -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -121,7 +121,7 @@ Remote Store: {{job.remote-store}} Synchronization failed: {{error}} -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -152,7 +152,7 @@ Tape Drive: {{job.drive}} Tape Backup successful. -Please visit the web interface for futher details: +Please visit the web interface for further details: @@ -171,7 +171,7 @@ Tape Drive: {{job.drive}} Tape Backup failed: {{error}} -Please visit the web interface for futher details: +Please visit the web interface for further details: diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs index 3e2887f5..1e8e009f 100644 --- a/src/server/worker_task.rs +++ b/src/server/worker_task.rs @@ -749,7 +749,7 @@ impl WorkerTask { match data.abort_listeners.pop() { None => { break; }, Some(ch) => { - let _ = ch.send(()); // ignore erros here + let _ = ch.send(()); // ignore errors here }, } } diff --git a/src/tape/changer/mod.rs b/src/tape/changer/mod.rs index 75b72131..1e58a437 100644 --- a/src/tape/changer/mod.rs +++ b/src/tape/changer/mod.rs @@ -35,7 +35,7 @@ use crate::api2::types::{ /// Changer element status. /// /// Drive and slots may be `Empty`, or contain some media, either -/// with knwon volume tag `VolumeTag(String)`, or without (`Full`). +/// with known volume tag `VolumeTag(String)`, or without (`Full`). #[derive(Serialize, Deserialize, Debug)] pub enum ElementStatus { Empty, @@ -87,7 +87,7 @@ pub struct MtxStatus { pub drives: Vec, /// List of known storage slots pub slots: Vec, - /// Tranport elements + /// Transport elements /// /// Note: Some libraries do not report transport elements. pub transports: Vec, @@ -261,7 +261,7 @@ pub trait MediaChange { /// List online media labels (label_text/barcodes) /// - /// List acessible (online) label texts. This does not include + /// List accessible (online) label texts. This does not include /// media inside import-export slots or cleaning media. fn online_media_label_texts(&mut self) -> Result, Error> { let status = self.status()?; @@ -378,7 +378,7 @@ pub trait MediaChange { /// Unload media to a free storage slot /// - /// If posible to the slot it was previously loaded from. + /// If possible to the slot it was previously loaded from. /// /// Note: This method consumes status - so please use returned status afterward. fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result { diff --git a/src/tape/changer/mtx/mod.rs b/src/tape/changer/mtx/mod.rs index 6ede17df..dc3a3aee 100644 --- a/src/tape/changer/mtx/mod.rs +++ b/src/tape/changer/mtx/mod.rs @@ -1,4 +1,4 @@ -//! Wrapper around expernal `mtx` command line tool +//! Wrapper around external `mtx` command line tool mod parse_mtx_status; pub use parse_mtx_status::*; diff --git a/src/tape/changer/sg_pt_changer.rs b/src/tape/changer/sg_pt_changer.rs index b2be9548..ed63b615 100644 --- a/src/tape/changer/sg_pt_changer.rs +++ b/src/tape/changer/sg_pt_changer.rs @@ -246,7 +246,7 @@ pub fn unload( Ok(()) } -/// Tranfer medium from one storage slot to another +/// Transfer medium from one storage slot to another pub fn transfer_medium( file: &mut F, from_slot: u64, @@ -362,7 +362,7 @@ pub fn read_element_status(file: &mut F) -> Result bail!("got wrong number of import/export elements"); } if (setup.transfer_element_count as usize) != drives.len() { - bail!("got wrong number of tranfer elements"); + bail!("got wrong number of transfer elements"); } // create same virtual slot order as mtx(1) @@ -428,7 +428,7 @@ struct SubHeader { element_type_code: u8, flags: u8, descriptor_length: u16, - reseved: u8, + reserved: u8, byte_count_of_descriptor_data_available: [u8;3], } diff --git a/src/tape/drive/encryption.rs b/src/tape/drive/encryption.rs index 1d9a50ae..7b687f2c 100644 --- a/src/tape/drive/encryption.rs +++ b/src/tape/drive/encryption.rs @@ -196,7 +196,7 @@ struct SspDataEncryptionCapabilityPage { page_code: u16, page_len: u16, extdecc_cfgp_byte: u8, - reserverd: [u8; 15], + reserved: [u8; 15], } #[derive(Endian)] @@ -241,13 +241,13 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result { let desc: SspDataEncryptionAlgorithmDescriptor = unsafe { reader.read_be_value()? }; if desc.descriptor_len != 0x14 { - bail!("got wrong key descriptior len"); + bail!("got wrong key descriptor len"); } if (desc.control_byte_4 & 0b00000011) != 2 { - continue; // cant encrypt in hardware + continue; // can't encrypt in hardware } if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 { - continue; // cant decrypt in hardware + continue; // can't decrypt in hardware } if desc.algorithm_code == 0x00010014 && desc.key_size == 32 { aes_cgm_index = Some(desc.algorythm_index); @@ -276,7 +276,7 @@ struct SspDataEncryptionStatusPage { control_byte: u8, key_format: u8, key_len: u16, - reserverd: [u8; 8], + reserved: [u8; 8], } fn decode_spin_data_encryption_status(data: &[u8]) -> Result { diff --git a/src/tape/drive/mam.rs b/src/tape/drive/mam.rs index a90f1655..cbb377d3 100644 --- a/src/tape/drive/mam.rs +++ b/src/tape/drive/mam.rs @@ -72,14 +72,14 @@ static MAM_ATTRIBUTES: &[ (u16, u16, MamFormat, &str) ] = &[ (0x08_02, 8, MamFormat::ASCII, "Application Version"), (0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"), (0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"), - (0x08_05, 1, MamFormat::BINARY, "Text Localization Identifer"), + (0x08_05, 1, MamFormat::BINARY, "Text Localization Identifier"), (0x08_06, 32, MamFormat::ASCII, "Barcode"), (0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"), (0x08_08, 160, MamFormat::ASCII, "Media Pool"), (0x08_0B, 16, MamFormat::ASCII, "Application Format Version"), (0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"), - (0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifer"), - (0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifer"), + (0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifier"), + (0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifier"), (0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"), (0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"), diff --git a/src/tape/drive/mod.rs b/src/tape/drive/mod.rs index c3021dc3..a386abf8 100644 --- a/src/tape/drive/mod.rs +++ b/src/tape/drive/mod.rs @@ -209,7 +209,7 @@ pub trait TapeDriver { /// Set or clear encryption key /// /// We use the media_set_uuid to XOR the secret key with the - /// uuid (first 16 bytes), so that each media set uses an uique + /// uuid (first 16 bytes), so that each media set uses an unique /// key for encryption. fn set_encryption( &mut self, @@ -465,7 +465,7 @@ pub fn request_and_load_media( } } -/// Aquires an exclusive lock for the tape device +/// Acquires an exclusive lock for the tape device /// /// Basically calls lock_device_path() using the configured drive path. pub fn lock_tape_device( @@ -539,7 +539,7 @@ fn tape_device_path( pub struct DeviceLockGuard(std::fs::File); -// Aquires an exclusive lock on `device_path` +// Acquires an exclusive lock on `device_path` // // Uses systemd escape_unit to compute a file name from `device_path`, the try // to lock `/var/lock/`. diff --git a/src/tape/drive/virtual_tape.rs b/src/tape/drive/virtual_tape.rs index c3367790..6dcf31fb 100644 --- a/src/tape/drive/virtual_tape.rs +++ b/src/tape/drive/virtual_tape.rs @@ -429,7 +429,7 @@ impl MediaChange for VirtualTapeHandle { } fn transfer_media(&mut self, _from: u64, _to: u64) -> Result { - bail!("media tranfer is not implemented!"); + bail!("media transfer is not implemented!"); } fn export_media(&mut self, _label_text: &str) -> Result, Error> { diff --git a/src/tape/file_formats/blocked_reader.rs b/src/tape/file_formats/blocked_reader.rs index bfb84f3d..3ef7e7f4 100644 --- a/src/tape/file_formats/blocked_reader.rs +++ b/src/tape/file_formats/blocked_reader.rs @@ -77,7 +77,7 @@ impl BlockedReader { if seq_nr != buffer.seq_nr() { proxmox::io_bail!( - "detected tape block with wrong seqence number ({} != {})", + "detected tape block with wrong sequence number ({} != {})", seq_nr, buffer.seq_nr()) } diff --git a/src/tape/file_formats/chunk_archive.rs b/src/tape/file_formats/chunk_archive.rs index fe0780fc..a2edbdd6 100644 --- a/src/tape/file_formats/chunk_archive.rs +++ b/src/tape/file_formats/chunk_archive.rs @@ -25,7 +25,7 @@ use crate::tape::{ /// /// A chunk archive consists of a `MediaContentHeader` followed by a /// list of chunks entries. Each chunk entry consists of a -/// `ChunkArchiveEntryHeader` folowed by the chunk data (`DataBlob`). +/// `ChunkArchiveEntryHeader` followed by the chunk data (`DataBlob`). /// /// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |` pub struct ChunkArchiveWriter<'a> { @@ -153,7 +153,7 @@ impl ChunkArchiveDecoder { Self { reader } } - /// Allow access to the underyling reader + /// Allow access to the underlying reader pub fn reader(&self) -> &R { &self.reader } diff --git a/src/tape/file_formats/snapshot_archive.rs b/src/tape/file_formats/snapshot_archive.rs index 5dc81706..e4d82abe 100644 --- a/src/tape/file_formats/snapshot_archive.rs +++ b/src/tape/file_formats/snapshot_archive.rs @@ -21,7 +21,7 @@ use crate::tape::{ /// /// This ignores file attributes like ACLs and xattrs. /// -/// Returns `Ok(Some(content_uuid))` on succees, and `Ok(None)` if +/// Returns `Ok(Some(content_uuid))` on success, and `Ok(None)` if /// `LEOM` was detected before all data was written. The stream is /// marked inclomplete in that case and does not contain all data (The /// backup task must rewrite the whole file on the next media). diff --git a/src/tape/helpers/snapshot_reader.rs b/src/tape/helpers/snapshot_reader.rs index 5789fb26..c8deb58a 100644 --- a/src/tape/helpers/snapshot_reader.rs +++ b/src/tape/helpers/snapshot_reader.rs @@ -85,7 +85,7 @@ impl SnapshotReader { Ok(file) } - /// Retunrs an iterator for all used chunks. + /// Returns an iterator for all used chunks. pub fn chunk_iterator(&self) -> Result { SnapshotChunkIterator::new(&self) } diff --git a/src/tape/inventory.rs b/src/tape/inventory.rs index 5b731ace..7ae9d565 100644 --- a/src/tape/inventory.rs +++ b/src/tape/inventory.rs @@ -561,7 +561,7 @@ impl Inventory { // Helpers to simplify testing - /// Genreate and insert a new free tape (test helper) + /// Generate and insert a new free tape (test helper) pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid { let label = MediaLabel { @@ -576,7 +576,7 @@ impl Inventory { uuid } - /// Genreate and insert a new tape assigned to a specific pool + /// Generate and insert a new tape assigned to a specific pool /// (test helper) pub fn generate_assigned_tape( &mut self, @@ -600,7 +600,7 @@ impl Inventory { uuid } - /// Genreate and insert a used tape (test helper) + /// Generate and insert a used tape (test helper) pub fn generate_used_tape( &mut self, label_text: &str, diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index 6b8b449b..a249da34 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -3,7 +3,7 @@ //! A set of backup medias. //! //! This struct manages backup media state during backup. The main -//! purpose is to allocate media sets and assing new tapes to it. +//! purpose is to allocate media sets and assign new tapes to it. //! //! @@ -137,7 +137,7 @@ impl MediaPool { &self.name } - /// Retruns encryption settings + /// Returns encryption settings pub fn encrypt_fingerprint(&self) -> Option { self.encrypt_fingerprint.clone() } @@ -286,7 +286,7 @@ impl MediaPool { Ok(list) } - // tests if the media data is considered as expired at sepcified time + // tests if the media data is considered as expired at specified time pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool { if media.status() != &MediaStatus::Full { return false; diff --git a/src/tape/media_set.rs b/src/tape/media_set.rs index 0cef10b7..5568e7f6 100644 --- a/src/tape/media_set.rs +++ b/src/tape/media_set.rs @@ -48,7 +48,7 @@ impl MediaSet { let seq_nr = seq_nr as usize; if self.media_list.len() > seq_nr { if self.media_list[seq_nr].is_some() { - bail!("found duplicate squence number in media set '{}/{}'", + bail!("found duplicate sequence number in media set '{}/{}'", self.uuid.to_string(), seq_nr); } } else { diff --git a/src/tape/pool_writer.rs b/src/tape/pool_writer.rs index 7ea42ee0..15c4b054 100644 --- a/src/tape/pool_writer.rs +++ b/src/tape/pool_writer.rs @@ -271,7 +271,7 @@ impl PoolWriter { } } - /// Move to EOM (if not aleady there), then creates a new snapshot + /// Move to EOM (if not already there), then creates a new snapshot /// archive writing specified files (as .pxar) into it. On /// success, this return 'Ok(true)' and the media catalog gets /// updated. @@ -330,7 +330,7 @@ impl PoolWriter { Ok((done, bytes_written)) } - /// Move to EOM (if not aleady there), then creates a new chunk + /// Move to EOM (if not already there), then creates a new chunk /// archive and writes chunks from 'chunk_iter'. This stops when /// it detect LEOM or when we reach max archive size /// (4GB). Written chunks are registered in the media catalog. diff --git a/src/tape/tape_write.rs b/src/tape/tape_write.rs index 7e354c88..8a3d4fd6 100644 --- a/src/tape/tape_write.rs +++ b/src/tape/tape_write.rs @@ -67,7 +67,7 @@ pub trait TapeWrite { /// /// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst /// -/// On sucess, this returns if we en countered a EOM condition. +/// On success, this returns if we en countered a EOM condition. pub fn tape_device_write_block( writer: &mut W, data: &[u8], diff --git a/src/tape/test/alloc_writable_media.rs b/src/tape/test/alloc_writable_media.rs index 8fc8f532..e293ec49 100644 --- a/src/tape/test/alloc_writable_media.rs +++ b/src/tape/test/alloc_writable_media.rs @@ -173,7 +173,7 @@ fn test_alloc_writable_media_4() -> Result<(), Error> { // next call fail because there is no free media assert!(pool.alloc_writable_media(start_time + 5).is_err()); - // Create new nedia set, so that previous set can expire + // Create new media set, so that previous set can expire pool.start_write_session(start_time + 10)?; assert!(pool.alloc_writable_media(start_time + 10).is_err()); diff --git a/src/tools/lru_cache.rs b/src/tools/lru_cache.rs index ecd15ba6..70289d3f 100644 --- a/src/tools/lru_cache.rs +++ b/src/tools/lru_cache.rs @@ -302,7 +302,7 @@ impl LinkedList { } } - /// Remove the node referenced by `node_ptr` from the linke list and return it. + /// Remove the node referenced by `node_ptr` from the linked list and return it. fn remove(&mut self, node_ptr: *mut CacheNode) -> Box> { let node = unsafe { Box::from_raw(node_ptr) }; diff --git a/src/tools/parallel_handler.rs b/src/tools/parallel_handler.rs index f2480b2f..76f761c4 100644 --- a/src/tools/parallel_handler.rs +++ b/src/tools/parallel_handler.rs @@ -138,10 +138,10 @@ impl ParallelHandler { if let Err(panic) = handle.join() { match panic.downcast::<&str>() { Ok(panic_msg) => msg_list.push( - format!("thread {} ({}) paniced: {}", self.name, i, panic_msg) + format!("thread {} ({}) panicked: {}", self.name, i, panic_msg) ), Err(_) => msg_list.push( - format!("thread {} ({}) paniced", self.name, i) + format!("thread {} ({}) panicked", self.name, i) ), } } diff --git a/src/tools/sgutils2.rs b/src/tools/sgutils2.rs index 94943c28..52352b15 100644 --- a/src/tools/sgutils2.rs +++ b/src/tools/sgutils2.rs @@ -4,7 +4,7 @@ //! //! See: `/usr/include/scsi/sg_pt.h` //! -//! The SCSI Commands Reference Manual also contains some usefull information. +//! The SCSI Commands Reference Manual also contains some useful information. use std::os::unix::io::AsRawFd; use std::ptr::NonNull; diff --git a/src/tools/subscription.rs b/src/tools/subscription.rs index 4d5caf39..07c6b40f 100644 --- a/src/tools/subscription.rs +++ b/src/tools/subscription.rs @@ -210,7 +210,7 @@ fn test_parse_register_response() -> Result<(), Error> { Ok(()) } -/// querys the up to date subscription status and parses the response +/// queries the up to date subscription status and parses the response pub fn check_subscription(key: String, server_id: String) -> Result { let now = proxmox::tools::time::epoch_i64(); @@ -299,7 +299,7 @@ pub fn delete_subscription() -> Result<(), Error> { Ok(()) } -/// updates apt authenification for repo access +/// updates apt authentication for repo access pub fn update_apt_auth(key: Option, password: Option) -> Result<(), Error> { let auth_conf = std::path::Path::new(APT_AUTH_FN); match (key, password) {