diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index bb20e149..86201b8e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -470,7 +470,7 @@ impl BackupNamespace { Ok(child) } - /// Pop one level off the namespace hierachy + /// Pop one level off the namespace hierarchy pub fn pop(&mut self) -> Option { let dropped = self.inner.pop(); if let Some(ref dropped) = dropped { diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 9e1a1893..532632c8 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -69,7 +69,7 @@ impl SizeUnit { } } -/// Returns the string repesentation +/// Returns the string representation impl std::fmt::Display for SizeUnit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index d29f18b4..3ed579cf 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -113,7 +113,7 @@ pub struct TrafficControlRule { #[serde(flatten)] pub limit: RateLimitConfig, // fixme: expose this? - // /// Bandwidth is shared accross all connections + // /// Bandwidth is shared across all connections // #[serde(skip_serializing_if="Option::is_none")] // pub shared: Option, /// Enable the rule at specific times diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs index cb21d27d..9a8854c0 100644 --- a/pbs-client/src/pxar/extract.rs +++ b/pbs-client/src/pxar/extract.rs @@ -603,7 +603,7 @@ where let stripped_path = match realpath.strip_prefix(prefix) { Ok(path) => path, Err(_) => { - // outside of our tar archive, add the first occurrance to the tar + // outside of our tar archive, add the first occurrence to the tar if let Some(path) = hardlinks.get(realpath) { path } else { diff --git a/pbs-client/src/pxar/flags.rs b/pbs-client/src/pxar/flags.rs index 78dab969..d46c8af3 100644 --- a/pbs-client/src/pxar/flags.rs +++ b/pbs-client/src/pxar/flags.rs @@ -179,7 +179,7 @@ use libc::c_long; | FS_PROJINHERIT_FL; } -use fs_flags::*; // for code formating/rusfmt +use fs_flags::*; // for code formatting/rusfmt #[rustfmt::skip] const CHATTR_MAP: [(Flags, c_long); 10] = [ diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs index f6ed1f41..357ae608 100644 --- a/pbs-config/src/acl.rs +++ b/pbs-config/src/acl.rs @@ -695,7 +695,7 @@ impl AclTree { return Ok(true); } - // now search trough the sub-tree + // now search through the sub-tree for (_comp, child) in node.children.iter() { if child.any_privs_below(auth_id, privs)? { return Ok(true); diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 41dc41ff..3cd1d98f 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -195,7 +195,7 @@ impl DataStore { /// Open a datastore given a raw configuration. /// /// # Safety - /// There's no memory saftey implication, but as this is opening a new ChunkStore it will + /// There's no memory safety implication, but as this is opening a new ChunkStore it will /// create a new process locker instance, potentially on the same path as existing safely /// created ones. This is dangerous as dropping the reference of this and thus the underlying /// chunkstore's process locker will close all locks from our process on the config.path, @@ -428,7 +428,7 @@ impl DataStore { /// Returns true if all the groups were removed, and false if some were protected. pub fn remove_namespace_groups(self: &Arc, ns: &BackupNamespace) -> Result { // FIXME: locking? The single groups/snapshots are already protected, so may not be - // necesarry (depends on what we all allow to do with namespaces) + // necessary (depends on what we all allow to do with namespaces) log::info!("removing all groups in namespace {}:/{ns}", self.name()); let mut removed_all_groups = true; diff --git a/pbs-datastore/src/hierarchy.rs b/pbs-datastore/src/hierarchy.rs index a5cc8f13..d5007b07 100644 --- a/pbs-datastore/src/hierarchy.rs +++ b/pbs-datastore/src/hierarchy.rs @@ -209,11 +209,11 @@ impl Iterator for ListNamespaces { /// A iterator for all Namespaces below an anchor namespace, most often that will be the /// `BackupNamespace::root()` one. /// -/// Descends depth-first (pre-order) into the namespace hierachy yielding namespaces immediately as +/// Descends depth-first (pre-order) into the namespace hierarchy yielding namespaces immediately as /// it finds them. /// /// Note: The anchor namespaces passed on creating the iterator will yielded as first element, this -/// can be usefull for searching all backup groups from a certain anchor, as that can contain +/// can be useful for searching all backup groups from a certain anchor, as that can contain /// sub-namespaces but also groups on its own level, so otherwise one would need to special case /// the ones from the own level. pub struct ListNamespacesRecursive { diff --git a/pbs-tape/Cargo.toml b/pbs-tape/Cargo.toml index 511c6bbe..dfe65778 100644 --- a/pbs-tape/Cargo.toml +++ b/pbs-tape/Cargo.toml @@ -3,7 +3,7 @@ name = "pbs-tape" version = "0.1.0" authors = ["Proxmox Support Team "] edition = "2018" -description = "LTO tage support" +description = "LTO tape support" [dependencies] lazy_static = "1.4" diff --git a/pbs-tape/src/sg_tape.rs b/pbs-tape/src/sg_tape.rs index 776ee06f..8ffd02dd 100644 --- a/pbs-tape/src/sg_tape.rs +++ b/pbs-tape/src/sg_tape.rs @@ -205,7 +205,7 @@ impl SgTape { (has_format, is_worm) } Err(_) => { - // LTO3 and older do not supprt medium configuration mode page + // LTO3 and older do not support medium configuration mode page (false, false) } }; diff --git a/pbs-tape/src/sgutils2.rs b/pbs-tape/src/sgutils2.rs index c405f0df..db92277b 100644 --- a/pbs-tape/src/sgutils2.rs +++ b/pbs-tape/src/sgutils2.rs @@ -259,7 +259,7 @@ impl ModeParameterHeader { pub struct ModeBlockDescriptor { pub density_code: u8, pub number_of_blocks: [u8; 3], - reserverd: u8, + reserved: u8, pub block_length: [u8; 3], } diff --git a/pbs-tools/src/lib.rs b/pbs-tools/src/lib.rs index 289efbc4..aded6a01 100644 --- a/pbs-tools/src/lib.rs +++ b/pbs-tools/src/lib.rs @@ -16,9 +16,9 @@ pub mod async_lru_cache; /// with the allocation pattern from our/tokio's complex async machinery, resulted in very large /// RSS sizes due to defragmentation and long-living (smaller) allocation on top of the heap /// avoiding that the (big) now free'd allocations below couldn't get given back to the OS. This is -/// not an issue with mmap'd memeory chunks, those can be given back at any time. +/// not an issue with mmap'd memory chunks, those can be given back at any time. /// -/// Lowering effective MMAP treshold to 128 KiB allows freeing up memory to the OS better and with +/// Lowering effective MMAP threshold to 128 KiB allows freeing up memory to the OS better and with /// lower latency, which reduces the peak *and* average RSS size by an order of magnitude when /// running backup jobs. We measured a reduction by a factor of 10-20 in experiments and see much /// less erratic behavior in the overall's runtime RSS size. diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index ada555a2..6d0e533a 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -1040,7 +1040,7 @@ async fn create_backup( } if dry_run { - println!("dry-run: no upload happend"); + println!("dry-run: no upload happened"); return Ok(Value::Null); } diff --git a/proxmox-file-restore/src/block_driver_qemu.rs b/proxmox-file-restore/src/block_driver_qemu.rs index 362fff0d..d203d72a 100644 --- a/proxmox-file-restore/src/block_driver_qemu.rs +++ b/proxmox-file-restore/src/block_driver_qemu.rs @@ -153,7 +153,7 @@ async fn ensure_running(details: &SnapRestoreDetails) -> Result bool { - // Always return true here, so we have access to everthing + // Always return true here, so we have access to everything true } fn is_group_member(&self, _userid: &str, group: &str) -> bool { diff --git a/proxmox-rest-server/src/api_config.rs b/proxmox-rest-server/src/api_config.rs index 9d257fd1..c63106d3 100644 --- a/proxmox-rest-server/src/api_config.rs +++ b/proxmox-rest-server/src/api_config.rs @@ -42,7 +42,7 @@ impl ApiConfig { /// `api_auth` - The Authentication handler /// /// `get_index_fn` - callback to generate the root page - /// (index). Please note that this fuctions gets a reference to + /// (index). Please note that this functions gets a reference to /// the [ApiConfig], so it can use [Handlebars] templates /// ([render_template](Self::render_template) to generate pages. pub fn new>( diff --git a/proxmox-rest-server/src/lib.rs b/proxmox-rest-server/src/lib.rs index dc538a80..3872e565 100644 --- a/proxmox-rest-server/src/lib.rs +++ b/proxmox-rest-server/src/lib.rs @@ -104,7 +104,7 @@ lazy_static::lazy_static! { static ref PSTART: u64 = PidStat::read_from_pid(Pid::from_raw(*PID)).unwrap().starttime; } -/// Retruns the current process ID (see [libc::getpid]) +/// Returns the current process ID (see [libc::getpid]) /// /// The value is cached at startup (so it is invalid after a fork) pub(crate) fn pid() -> i32 { diff --git a/proxmox-restore-daemon/src/main.rs b/proxmox-restore-daemon/src/main.rs index b2c809fa..f7b94ef1 100644 --- a/proxmox-restore-daemon/src/main.rs +++ b/proxmox-restore-daemon/src/main.rs @@ -55,7 +55,7 @@ fn main() -> Result<(), Error> { ); } - // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via + // don't have a real syslog (and no persistence), so use env_logger to print to a log file (via // stdout to a serial terminal attached by QEMU) env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) .write_style(env_logger::WriteStyle::Never) @@ -153,7 +153,7 @@ fn accept_vsock_connections( } } Err(err) => { - error!("error accepting vsock connetion: {}", err); + error!("error accepting vsock connection: {}", err); } } } diff --git a/proxmox-rrd/src/rrd.rs b/proxmox-rrd/src/rrd.rs index 41af6242..4ae3ee93 100644 --- a/proxmox-rrd/src/rrd.rs +++ b/proxmox-rrd/src/rrd.rs @@ -8,7 +8,7 @@ //! ## Features //! //! * Well defined data format [CBOR](https://datatracker.ietf.org/doc/html/rfc8949) -//! * Plattform independent (big endian f64, hopefully a standard format?) +//! * Platform independent (big endian f64, hopefully a standard format?) //! * Arbitrary number of RRAs (dynamically changeable) use std::io::{Read, Write}; @@ -456,7 +456,7 @@ impl RRD { /// This selects the RRA with specified [CF] and (minimum) /// resolution, and extract data from `start` to `end`. /// - /// `start`: Start time. If not sepecified, we simply extract 10 data points. + /// `start`: Start time. If not specified, we simply extract 10 data points. /// `end`: End time. Default is to use the current time. pub fn extract_data( &self, @@ -600,7 +600,7 @@ mod tests { assert_eq!(reso, 60); assert_eq!(data, [Some(6.5), Some(8.5), Some(10.5), Some(12.5), None]); - // add much newer vaule (should delete all previous/outdated value) + // add much newer value (should delete all previous/outdated value) let i = 100; rrd.update((i as f64) * 30.0, i as f64); println!("TEST {:?}", serde_json::to_string_pretty(&rrd));