mirror of
https://git.proxmox.com/git/proxmox-backup
synced 2025-04-28 15:16:21 +00:00
tree wide: typo fixes through codespell
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
1e319bcb06
commit
74cad4a8bd
@ -470,7 +470,7 @@ impl BackupNamespace {
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
/// Pop one level off the namespace hierachy
|
||||
/// Pop one level off the namespace hierarchy
|
||||
pub fn pop(&mut self) -> Option<String> {
|
||||
let dropped = self.inner.pop();
|
||||
if let Some(ref dropped) = dropped {
|
||||
|
@ -69,7 +69,7 @@ impl SizeUnit {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the string repesentation
|
||||
/// Returns the string representation
|
||||
impl std::fmt::Display for SizeUnit {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
|
@ -113,7 +113,7 @@ pub struct TrafficControlRule {
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
// fixme: expose this?
|
||||
// /// Bandwidth is shared accross all connections
|
||||
// /// Bandwidth is shared across all connections
|
||||
// #[serde(skip_serializing_if="Option::is_none")]
|
||||
// pub shared: Option<bool>,
|
||||
/// Enable the rule at specific times
|
||||
|
@ -603,7 +603,7 @@ where
|
||||
let stripped_path = match realpath.strip_prefix(prefix) {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
// outside of our tar archive, add the first occurrance to the tar
|
||||
// outside of our tar archive, add the first occurrence to the tar
|
||||
if let Some(path) = hardlinks.get(realpath) {
|
||||
path
|
||||
} else {
|
||||
|
@ -179,7 +179,7 @@ use libc::c_long;
|
||||
| FS_PROJINHERIT_FL;
|
||||
|
||||
}
|
||||
use fs_flags::*; // for code formating/rusfmt
|
||||
use fs_flags::*; // for code formatting/rusfmt
|
||||
|
||||
#[rustfmt::skip]
|
||||
const CHATTR_MAP: [(Flags, c_long); 10] = [
|
||||
|
@ -695,7 +695,7 @@ impl AclTree {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// now search trough the sub-tree
|
||||
// now search through the sub-tree
|
||||
for (_comp, child) in node.children.iter() {
|
||||
if child.any_privs_below(auth_id, privs)? {
|
||||
return Ok(true);
|
||||
|
@ -195,7 +195,7 @@ impl DataStore {
|
||||
/// Open a datastore given a raw configuration.
|
||||
///
|
||||
/// # Safety
|
||||
/// There's no memory saftey implication, but as this is opening a new ChunkStore it will
|
||||
/// There's no memory safety implication, but as this is opening a new ChunkStore it will
|
||||
/// create a new process locker instance, potentially on the same path as existing safely
|
||||
/// created ones. This is dangerous as dropping the reference of this and thus the underlying
|
||||
/// chunkstore's process locker will close all locks from our process on the config.path,
|
||||
@ -428,7 +428,7 @@ impl DataStore {
|
||||
/// Returns true if all the groups were removed, and false if some were protected.
|
||||
pub fn remove_namespace_groups(self: &Arc<Self>, ns: &BackupNamespace) -> Result<bool, Error> {
|
||||
// FIXME: locking? The single groups/snapshots are already protected, so may not be
|
||||
// necesarry (depends on what we all allow to do with namespaces)
|
||||
// necessary (depends on what we all allow to do with namespaces)
|
||||
log::info!("removing all groups in namespace {}:/{ns}", self.name());
|
||||
|
||||
let mut removed_all_groups = true;
|
||||
|
@ -209,11 +209,11 @@ impl Iterator for ListNamespaces {
|
||||
/// A iterator for all Namespaces below an anchor namespace, most often that will be the
|
||||
/// `BackupNamespace::root()` one.
|
||||
///
|
||||
/// Descends depth-first (pre-order) into the namespace hierachy yielding namespaces immediately as
|
||||
/// Descends depth-first (pre-order) into the namespace hierarchy yielding namespaces immediately as
|
||||
/// it finds them.
|
||||
///
|
||||
/// Note: The anchor namespaces passed on creating the iterator will yielded as first element, this
|
||||
/// can be usefull for searching all backup groups from a certain anchor, as that can contain
|
||||
/// can be useful for searching all backup groups from a certain anchor, as that can contain
|
||||
/// sub-namespaces but also groups on its own level, so otherwise one would need to special case
|
||||
/// the ones from the own level.
|
||||
pub struct ListNamespacesRecursive {
|
||||
|
@ -3,7 +3,7 @@ name = "pbs-tape"
|
||||
version = "0.1.0"
|
||||
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||
edition = "2018"
|
||||
description = "LTO tage support"
|
||||
description = "LTO tape support"
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.4"
|
||||
|
@ -205,7 +205,7 @@ impl SgTape {
|
||||
(has_format, is_worm)
|
||||
}
|
||||
Err(_) => {
|
||||
// LTO3 and older do not supprt medium configuration mode page
|
||||
// LTO3 and older do not support medium configuration mode page
|
||||
(false, false)
|
||||
}
|
||||
};
|
||||
|
@ -259,7 +259,7 @@ impl ModeParameterHeader {
|
||||
pub struct ModeBlockDescriptor {
|
||||
pub density_code: u8,
|
||||
pub number_of_blocks: [u8; 3],
|
||||
reserverd: u8,
|
||||
reserved: u8,
|
||||
pub block_length: [u8; 3],
|
||||
}
|
||||
|
||||
|
@ -16,9 +16,9 @@ pub mod async_lru_cache;
|
||||
/// with the allocation pattern from our/tokio's complex async machinery, resulted in very large
|
||||
/// RSS sizes due to defragmentation and long-living (smaller) allocation on top of the heap
|
||||
/// avoiding that the (big) now free'd allocations below couldn't get given back to the OS. This is
|
||||
/// not an issue with mmap'd memeory chunks, those can be given back at any time.
|
||||
/// not an issue with mmap'd memory chunks, those can be given back at any time.
|
||||
///
|
||||
/// Lowering effective MMAP treshold to 128 KiB allows freeing up memory to the OS better and with
|
||||
/// Lowering effective MMAP threshold to 128 KiB allows freeing up memory to the OS better and with
|
||||
/// lower latency, which reduces the peak *and* average RSS size by an order of magnitude when
|
||||
/// running backup jobs. We measured a reduction by a factor of 10-20 in experiments and see much
|
||||
/// less erratic behavior in the overall's runtime RSS size.
|
||||
|
@ -1040,7 +1040,7 @@ async fn create_backup(
|
||||
}
|
||||
|
||||
if dry_run {
|
||||
println!("dry-run: no upload happend");
|
||||
println!("dry-run: no upload happened");
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ async fn ensure_running(details: &SnapRestoreDetails) -> Result<VsockClient, Err
|
||||
.unwrap_or(0)
|
||||
.wrapping_add(1);
|
||||
|
||||
// offset cid by user id, to avoid unneccessary retries
|
||||
// offset cid by user id, to avoid unnecessary retries
|
||||
let running_uid = nix::unistd::Uid::current();
|
||||
cid = cid.wrapping_add(running_uid.as_raw() as i32);
|
||||
|
||||
|
@ -21,7 +21,7 @@ struct DummyUserInfo;
|
||||
|
||||
impl UserInformation for DummyUserInfo {
|
||||
fn is_superuser(&self, _userid: &str) -> bool {
|
||||
// Always return true here, so we have access to everthing
|
||||
// Always return true here, so we have access to everything
|
||||
true
|
||||
}
|
||||
fn is_group_member(&self, _userid: &str, group: &str) -> bool {
|
||||
|
@ -42,7 +42,7 @@ impl ApiConfig {
|
||||
/// `api_auth` - The Authentication handler
|
||||
///
|
||||
/// `get_index_fn` - callback to generate the root page
|
||||
/// (index). Please note that this fuctions gets a reference to
|
||||
/// (index). Please note that this functions gets a reference to
|
||||
/// the [ApiConfig], so it can use [Handlebars] templates
|
||||
/// ([render_template](Self::render_template) to generate pages.
|
||||
pub fn new<B: Into<PathBuf>>(
|
||||
|
@ -104,7 +104,7 @@ lazy_static::lazy_static! {
|
||||
static ref PSTART: u64 = PidStat::read_from_pid(Pid::from_raw(*PID)).unwrap().starttime;
|
||||
}
|
||||
|
||||
/// Retruns the current process ID (see [libc::getpid])
|
||||
/// Returns the current process ID (see [libc::getpid])
|
||||
///
|
||||
/// The value is cached at startup (so it is invalid after a fork)
|
||||
pub(crate) fn pid() -> i32 {
|
||||
|
@ -55,7 +55,7 @@ fn main() -> Result<(), Error> {
|
||||
);
|
||||
}
|
||||
|
||||
// don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
|
||||
// don't have a real syslog (and no persistence), so use env_logger to print to a log file (via
|
||||
// stdout to a serial terminal attached by QEMU)
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
||||
.write_style(env_logger::WriteStyle::Never)
|
||||
@ -153,7 +153,7 @@ fn accept_vsock_connections(
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("error accepting vsock connetion: {}", err);
|
||||
error!("error accepting vsock connection: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
//! ## Features
|
||||
//!
|
||||
//! * Well defined data format [CBOR](https://datatracker.ietf.org/doc/html/rfc8949)
|
||||
//! * Plattform independent (big endian f64, hopefully a standard format?)
|
||||
//! * Platform independent (big endian f64, hopefully a standard format?)
|
||||
//! * Arbitrary number of RRAs (dynamically changeable)
|
||||
|
||||
use std::io::{Read, Write};
|
||||
@ -456,7 +456,7 @@ impl RRD {
|
||||
/// This selects the RRA with specified [CF] and (minimum)
|
||||
/// resolution, and extract data from `start` to `end`.
|
||||
///
|
||||
/// `start`: Start time. If not sepecified, we simply extract 10 data points.
|
||||
/// `start`: Start time. If not specified, we simply extract 10 data points.
|
||||
/// `end`: End time. Default is to use the current time.
|
||||
pub fn extract_data(
|
||||
&self,
|
||||
@ -600,7 +600,7 @@ mod tests {
|
||||
assert_eq!(reso, 60);
|
||||
assert_eq!(data, [Some(6.5), Some(8.5), Some(10.5), Some(12.5), None]);
|
||||
|
||||
// add much newer vaule (should delete all previous/outdated value)
|
||||
// add much newer value (should delete all previous/outdated value)
|
||||
let i = 100;
|
||||
rrd.update((i as f64) * 30.0, i as f64);
|
||||
println!("TEST {:?}", serde_json::to_string_pretty(&rrd));
|
||||
|
Loading…
Reference in New Issue
Block a user