From 669d53c0bfd3b49da531504dde970e582c27d2c7 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 09:04:09 +0200 Subject: [PATCH 001/299] add pbs-api-types subcrate, move key_derivation move key_derivation to pbs-datastore pbs-api-types should only contain "basic" types which * are usually required by clients * don't depend on pbs-related code directly Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 12 ++++++++++++ pbs-api-types/src/lib.rs | 14 ++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 pbs-api-types/Cargo.toml create mode 100644 pbs-api-types/src/lib.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml new file mode 100644 index 00000000..0a9f7ea7 --- /dev/null +++ b/pbs-api-types/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "pbs-api-types" +version = "0.1.0" +authors = ["Proxmox Support Team "] +edition = "2018" +description = "general API type helpers for PBS" + +[dependencies] +lazy_static = "1.4" +regex = "1.2" + +proxmox = { version = "0.11.5", default-features = false, features = [ "api-macro" ] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs new file mode 100644 index 00000000..0e3d5c39 --- /dev/null +++ b/pbs-api-types/src/lib.rs @@ -0,0 +1,14 @@ +use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::const_regex; + +const_regex! { + pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; +} + +pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX); + +pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = + StringSchema::new("X509 certificate fingerprint (sha256).") + .format(&FINGERPRINT_SHA256_FORMAT) + .schema(); From 4642ba673a5511ca946ec3cfed6caec3611388d0 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 09:24:39 +0200 Subject: [PATCH 002/299] move TaskState trait to pbs-datastore Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 0e3d5c39..108c2ee2 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,3 +1,5 @@ +//! Basic API types used by most of the PBS code. + use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; From ad4a9aea25e721d8c0c18faab1f2e6fa90177edb Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 09:49:35 +0200 Subject: [PATCH 003/299] move id and single line comment format to pbs-api-types Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 108c2ee2..125f67c8 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,8 +3,26 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; +#[macro_export] +macro_rules! PROXMOX_SAFE_ID_REGEX_STR { + () => { + r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" + }; +} + const_regex! { pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; + + /// Regex for safe identifiers. + /// + /// This + /// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html) + /// contains further information why it is reasonable to restict + /// names this way. This is not only useful for filenames, but for + /// any identifier command line tools work with. + pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); + + pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; } pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = @@ -14,3 +32,13 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new("X509 certificate fingerprint (sha256).") .format(&FINGERPRINT_SHA256_FORMAT) .schema(); + +pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); + +pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX); + +pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .schema(); From 4e21b52bc2afa8f6947caa002b8148287bed989c Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 09:53:48 +0200 Subject: [PATCH 004/299] move userid types to pbs-api-types Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 2 + pbs-api-types/src/lib.rs | 9 + pbs-api-types/src/userid.rs | 691 ++++++++++++++++++++++++++++++++++++ 3 files changed, 702 insertions(+) create mode 100644 pbs-api-types/src/userid.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 0a9f7ea7..9f3e9f5f 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -6,7 +6,9 @@ edition = "2018" description = "general API type helpers for PBS" [dependencies] +anyhow = "1.0" lazy_static = "1.4" regex = "1.2" +serde = { version = "1.0", features = ["derive"] } proxmox = { version = "0.11.5", default-features = false, features = [ "api-macro" ] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 125f67c8..58f5615a 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,6 +3,15 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; +#[macro_use] +mod userid; +pub use userid::Authid; +pub use userid::Userid; +pub use userid::{Realm, RealmRef}; +pub use userid::{Tokenname, TokennameRef}; +pub use userid::{Username, UsernameRef}; +pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; + #[macro_export] macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs new file mode 100644 index 00000000..34c7a09f --- /dev/null +++ b/pbs-api-types/src/userid.rs @@ -0,0 +1,691 @@ +//! Types for user handling. +//! +//! We have [`Username`]s, [`Realm`]s and [`Tokenname`]s. To uniquely identify a user/API token, they +//! must be combined into a [`Userid`] or [`Authid`]. +//! +//! Since they're all string types, they're organized as follows: +//! +//! * [`Username`]: an owned user name. Internally a `String`. +//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs +//! with `String`, meaning you can only make references to it. +//! * [`Realm`]: an owned realm (`String` equivalent). +//! * [`RealmRef`]: a borrowed realm (`str` equivalent). +//! * [`Tokenname`]: an owned API token name (`String` equivalent) +//! * [`TokennameRef`]: a borrowed `Tokenname` (`str` equivalent). +//! * [`Userid`]: an owned user id (`"user@realm"`). +//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`). +//! Note that `Userid` and `Authid` do not have a separate borrowed type. +//! +//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be +//! compared directly. If a direct comparison is really required, they can be compared as strings +//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other hand can be +//! compared with each other, as in those cases the comparison has meaning. + +use std::borrow::Borrow; +use std::convert::TryFrom; +use std::fmt; + +use anyhow::{bail, format_err, Error}; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; +use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::const_regex; + +use super::PROXMOX_SAFE_ID_REGEX_STR; + +// we only allow a limited set of characters +// colon is not allowed, because we store usernames in +// colon separated lists)! +// slash is not allowed because it is used as pve API delimiter +// also see "man useradd" +#[macro_export] +macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") } +#[macro_export] +macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) } +#[macro_export] +macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) } +#[macro_export] +macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) } +#[macro_export] +macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) } + +const_regex! { + pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$"); + pub PROXMOX_TOKEN_NAME_REGEX = concat!(r"^", TOKEN_NAME_REGEX_STR!(), r"$"); + pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$"); + pub PROXMOX_APITOKEN_ID_REGEX = concat!(r"^", APITOKEN_ID_REGEX_STR!(), r"$"); + pub PROXMOX_AUTH_ID_REGEX = concat!(r"^", r"(?:", USER_ID_REGEX_STR!(), r"|", APITOKEN_ID_REGEX_STR!(), r")$"); + pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$"); +} + +pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX); +pub const PROXMOX_TOKEN_NAME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_TOKEN_NAME_REGEX); + +pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX); +pub const PROXMOX_TOKEN_ID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_APITOKEN_ID_REGEX); +pub const PROXMOX_AUTH_ID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_AUTH_ID_REGEX); + +pub const PROXMOX_TOKEN_ID_SCHEMA: Schema = StringSchema::new("API Token ID") + .format(&PROXMOX_TOKEN_ID_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + +pub const PROXMOX_TOKEN_NAME_SCHEMA: Schema = StringSchema::new("API Token name") + .format(&PROXMOX_TOKEN_NAME_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + +pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX); + +pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID") + .format(&PROXMOX_GROUP_ID_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + +pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema = + StringSchema::new("Authentication domain ID") + .format(&super::PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32); +pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema(); + + +#[api( + type: String, + format: &PROXMOX_USER_NAME_FORMAT, +)] +/// The user name part of a user id. +/// +/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order +/// to compare user names directly, they need to be explicitly compared as strings by calling +/// `.as_str()`. +/// +/// ```compile_fail +/// fn test(a: Username, b: Username) -> bool { +/// a == b // illegal and does not compile +/// } +/// ``` +#[derive(Clone, Debug, Hash, Deserialize, Serialize)] +pub struct Username(String); + +/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user. +/// +/// This is like a `str` to the `String` of a [`Username`]. +#[derive(Debug, Hash)] +pub struct UsernameRef(str); + +impl UsernameRef { + fn new(s: &str) -> &Self { + unsafe { &*(s as *const str as *const UsernameRef) } + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl std::ops::Deref for Username { + type Target = UsernameRef; + + fn deref(&self) -> &UsernameRef { + self.borrow() + } +} + +impl Borrow for Username { + fn borrow(&self) -> &UsernameRef { + UsernameRef::new(self.0.as_str()) + } +} + +impl AsRef for Username { + fn as_ref(&self) -> &UsernameRef { + self.borrow() + } +} + +impl ToOwned for UsernameRef { + type Owned = Username; + + fn to_owned(&self) -> Self::Owned { + Username(self.0.to_owned()) + } +} + +impl TryFrom for Username { + type Error = Error; + + fn try_from(s: String) -> Result { + if !PROXMOX_USER_NAME_REGEX.is_match(&s) { + bail!("invalid user name"); + } + + Ok(Self(s)) + } +} + +impl<'a> TryFrom<&'a str> for &'a UsernameRef { + type Error = Error; + + fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> { + if !PROXMOX_USER_NAME_REGEX.is_match(s) { + bail!("invalid name in user id"); + } + + Ok(UsernameRef::new(s)) + } +} + +#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)] +/// An authentication realm. +#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)] +pub struct Realm(String); + +/// A reference to an authentication realm. +/// +/// This is like a `str` to the `String` of a `Realm`. +#[derive(Debug, Hash, Eq, PartialEq)] +pub struct RealmRef(str); + +impl RealmRef { + fn new(s: &str) -> &Self { + unsafe { &*(s as *const str as *const RealmRef) } + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl std::ops::Deref for Realm { + type Target = RealmRef; + + fn deref(&self) -> &RealmRef { + self.borrow() + } +} + +impl Borrow for Realm { + fn borrow(&self) -> &RealmRef { + RealmRef::new(self.0.as_str()) + } +} + +impl AsRef for Realm { + fn as_ref(&self) -> &RealmRef { + self.borrow() + } +} + +impl ToOwned for RealmRef { + type Owned = Realm; + + fn to_owned(&self) -> Self::Owned { + Realm(self.0.to_owned()) + } +} + +impl TryFrom for Realm { + type Error = Error; + + fn try_from(s: String) -> Result { + PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s) + .map_err(|_| format_err!("invalid realm"))?; + + Ok(Self(s)) + } +} + +impl<'a> TryFrom<&'a str> for &'a RealmRef { + type Error = Error; + + fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> { + PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s) + .map_err(|_| format_err!("invalid realm"))?; + + Ok(RealmRef::new(s)) + } +} + +impl PartialEq for Realm { + fn eq(&self, rhs: &str) -> bool { + self.0 == rhs + } +} + +impl PartialEq<&str> for Realm { + fn eq(&self, rhs: &&str) -> bool { + self.0 == *rhs + } +} + +impl PartialEq for RealmRef { + fn eq(&self, rhs: &str) -> bool { + self.0 == *rhs + } +} + +impl PartialEq<&str> for RealmRef { + fn eq(&self, rhs: &&str) -> bool { + self.0 == **rhs + } +} + +impl PartialEq for Realm { + fn eq(&self, rhs: &RealmRef) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq for RealmRef { + fn eq(&self, rhs: &Realm) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq for &RealmRef { + fn eq(&self, rhs: &Realm) -> bool { + (*self).0 == rhs.0 + } +} + +#[api( + type: String, + format: &PROXMOX_TOKEN_NAME_FORMAT, +)] +/// The token ID part of an API token authentication id. +/// +/// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases. +#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] +pub struct Tokenname(String); + +/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify +/// the user. +/// +/// This is like a `str` to the `String` of a [`Tokenname`]. +#[derive(Debug, Hash)] +pub struct TokennameRef(str); + +#[doc(hidden)] +/// ```compile_fail +/// let a: Username = unsafe { std::mem::zeroed() }; +/// let b: Username = unsafe { std::mem::zeroed() }; +/// let _ = ::eq(&a, &b); +/// ``` +/// +/// ```compile_fail +/// let a: &UsernameRef = unsafe { std::mem::zeroed() }; +/// let b: &UsernameRef = unsafe { std::mem::zeroed() }; +/// let _ = <&UsernameRef as PartialEq>::eq(a, b); +/// ``` +/// +/// ```compile_fail +/// let a: &UsernameRef = unsafe { std::mem::zeroed() }; +/// let b: &UsernameRef = unsafe { std::mem::zeroed() }; +/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b); +/// ``` +struct _AssertNoEqImpl; + +impl TokennameRef { + fn new(s: &str) -> &Self { + unsafe { &*(s as *const str as *const TokennameRef) } + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl std::ops::Deref for Tokenname { + type Target = TokennameRef; + + fn deref(&self) -> &TokennameRef { + self.borrow() + } +} + +impl Borrow for Tokenname { + fn borrow(&self) -> &TokennameRef { + TokennameRef::new(self.0.as_str()) + } +} + +impl AsRef for Tokenname { + fn as_ref(&self) -> &TokennameRef { + self.borrow() + } +} + +impl ToOwned for TokennameRef { + type Owned = Tokenname; + + fn to_owned(&self) -> Self::Owned { + Tokenname(self.0.to_owned()) + } +} + +impl TryFrom for Tokenname { + type Error = Error; + + fn try_from(s: String) -> Result { + if !PROXMOX_TOKEN_NAME_REGEX.is_match(&s) { + bail!("invalid token name"); + } + + Ok(Self(s)) + } +} + +impl<'a> TryFrom<&'a str> for &'a TokennameRef { + type Error = Error; + + fn try_from(s: &'a str) -> Result<&'a TokennameRef, Error> { + if !PROXMOX_TOKEN_NAME_REGEX.is_match(s) { + bail!("invalid token name in user id"); + } + + Ok(TokennameRef::new(s)) + } +} + +/// A complete user id consisting of a user name and a realm +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Userid { + data: String, + name_len: usize, +} + +impl Userid { + pub const API_SCHEMA: Schema = StringSchema::new("User ID") + .format(&PROXMOX_USER_ID_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + + const fn new(data: String, name_len: usize) -> Self { + Self { data, name_len } + } + + pub fn name(&self) -> &UsernameRef { + UsernameRef::new(&self.data[..self.name_len]) + } + + pub fn realm(&self) -> &RealmRef { + RealmRef::new(&self.data[(self.name_len + 1)..]) + } + + pub fn as_str(&self) -> &str { + &self.data + } + + /// Get the "root@pam" user id. + pub fn root_userid() -> &'static Self { + &*ROOT_USERID + } +} + +lazy_static! { + pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4); +} + +impl From for Userid { + fn from(authid: Authid) -> Self { + authid.user + } +} + +impl From<(Username, Realm)> for Userid { + fn from(parts: (Username, Realm)) -> Self { + Self::from((parts.0.as_ref(), parts.1.as_ref())) + } +} + +impl From<(&UsernameRef, &RealmRef)> for Userid { + fn from(parts: (&UsernameRef, &RealmRef)) -> Self { + let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str()); + let name_len = parts.0.as_str().len(); + Self { data, name_len } + } +} + +impl fmt::Display for Userid { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.data.fmt(f) + } +} + +impl std::str::FromStr for Userid { + type Err = Error; + + fn from_str(id: &str) -> Result { + let name_len = id + .as_bytes() + .iter() + .rposition(|&b| b == b'@') + .ok_or_else(|| format_err!("not a valid user id"))?; + + let name = &id[..name_len]; + let realm = &id[(name_len + 1)..]; + + if !PROXMOX_USER_NAME_REGEX.is_match(name) { + bail!("invalid user name in user id"); + } + + PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm) + .map_err(|_| format_err!("invalid realm in user id"))?; + + Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm)))) + } +} + +impl TryFrom for Userid { + type Error = Error; + + fn try_from(data: String) -> Result { + let name_len = data + .as_bytes() + .iter() + .rposition(|&b| b == b'@') + .ok_or_else(|| format_err!("not a valid user id"))?; + + if !PROXMOX_USER_NAME_REGEX.is_match(&data[..name_len]) { + bail!("invalid user name in user id"); + } + + PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..]) + .map_err(|_| format_err!("invalid realm in user id"))?; + + Ok(Self { data, name_len }) + } +} + +impl PartialEq for Userid { + fn eq(&self, rhs: &str) -> bool { + self.data == *rhs + } +} + +impl PartialEq<&str> for Userid { + fn eq(&self, rhs: &&str) -> bool { + *self == **rhs + } +} + +impl PartialEq for Userid { + fn eq(&self, rhs: &String) -> bool { + self == rhs.as_str() + } +} + +/// A complete authentication id consisting of a user id and an optional token name. +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct Authid { + user: Userid, + tokenname: Option +} + +impl Authid { + pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID") + .format(&PROXMOX_AUTH_ID_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + + const fn new(user: Userid, tokenname: Option) -> Self { + Self { user, tokenname } + } + + pub fn user(&self) -> &Userid { + &self.user + } + + pub fn is_token(&self) -> bool { + self.tokenname.is_some() + } + + pub fn tokenname(&self) -> Option<&TokennameRef> { + match &self.tokenname { + Some(name) => Some(&name), + None => None, + } + } + + /// Get the "root@pam" auth id. + pub fn root_auth_id() -> &'static Self { + &*ROOT_AUTHID + } +} + +lazy_static! { + pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4)); +} + +impl From for Authid { + fn from(parts: Userid) -> Self { + Self::new(parts, None) + } +} + +impl From<(Userid, Option)> for Authid { + fn from(parts: (Userid, Option)) -> Self { + Self::new(parts.0, parts.1) + } +} + +impl fmt::Display for Authid { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &self.tokenname { + Some(token) => write!(f, "{}!{}", self.user, token.as_str()), + None => self.user.fmt(f), + } + } +} + +impl std::str::FromStr for Authid { + type Err = Error; + + fn from_str(id: &str) -> Result { + let name_len = id + .as_bytes() + .iter() + .rposition(|&b| b == b'@') + .ok_or_else(|| format_err!("not a valid user id"))?; + + let realm_end = id + .as_bytes() + .iter() + .rposition(|&b| b == b'!') + .map(|pos| if pos < name_len { id.len() } else { pos }) + .unwrap_or_else(|| id.len()); + + if realm_end == id.len() - 1 { + bail!("empty token name in userid"); + } + + let user = Userid::from_str(&id[..realm_end])?; + + if id.len() > realm_end { + let token = Tokenname::try_from(id[(realm_end + 1)..].to_string())?; + Ok(Self::new(user, Some(token))) + } else { + Ok(Self::new(user, None)) + } + } +} + +impl TryFrom for Authid { + type Error = Error; + + fn try_from(mut data: String) -> Result { + let name_len = data + .as_bytes() + .iter() + .rposition(|&b| b == b'@') + .ok_or_else(|| format_err!("not a valid user id"))?; + + let realm_end = data + .as_bytes() + .iter() + .rposition(|&b| b == b'!') + .map(|pos| if pos < name_len { data.len() } else { pos }) + .unwrap_or_else(|| data.len()); + + if realm_end == data.len() - 1 { + bail!("empty token name in userid"); + } + + let tokenname = if data.len() > realm_end { + Some(Tokenname::try_from(data[(realm_end + 1)..].to_string())?) + } else { + None + }; + + data.truncate(realm_end); + + let user:Userid = data.parse()?; + + Ok(Self { user, tokenname }) + } +} + +#[test] +fn test_token_id() { + let userid: Userid = "test@pam".parse().expect("parsing Userid failed"); + assert_eq!(userid.name().as_str(), "test"); + assert_eq!(userid.realm(), "pam"); + assert_eq!(userid, "test@pam"); + + let auth_id: Authid = "test@pam".parse().expect("parsing user Authid failed"); + assert_eq!(auth_id.to_string(), "test@pam".to_string()); + assert!(!auth_id.is_token()); + + assert_eq!(auth_id.user(), &userid); + + let user_auth_id = Authid::from(userid.clone()); + assert_eq!(user_auth_id, auth_id); + assert!(!user_auth_id.is_token()); + + let auth_id: Authid = "test@pam!bar".parse().expect("parsing token Authid failed"); + let token_userid = auth_id.user(); + assert_eq!(&userid, token_userid); + assert!(auth_id.is_token()); + assert_eq!(auth_id.tokenname().expect("Token has tokenname").as_str(), TokennameRef::new("bar").as_str()); + assert_eq!(auth_id.to_string(), "test@pam!bar".to_string()); +} + +proxmox::forward_deserialize_to_from_str!(Userid); +proxmox::forward_serialize_to_display!(Userid); + +proxmox::forward_deserialize_to_from_str!(Authid); +proxmox::forward_serialize_to_display!(Authid); From add07b08c7bde9b510a58aae4cc740092f08740f Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 11:28:53 +0200 Subject: [PATCH 005/299] move backup id related types to pbs-api-types Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 45 +++++++++++++++++++++++++++++++------ pbs-api-types/src/userid.rs | 2 -- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 58f5615a..7775324d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,6 +3,30 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; +#[rustfmt::skip] +#[macro_export] +macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; } + +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") } + +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") } + +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") } + +#[rustfmt::skip] +#[macro_export] +macro_rules! SNAPSHOT_PATH_REGEX_STR { + () => ( + concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")") + ); +} + #[macro_use] mod userid; pub use userid::Authid; @@ -12,14 +36,19 @@ pub use userid::{Tokenname, TokennameRef}; pub use userid::{Username, UsernameRef}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; -#[macro_export] -macro_rules! PROXMOX_SAFE_ID_REGEX_STR { - () => { - r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" - }; -} - const_regex! { + pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); + + pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); + + pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); + + pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); + + pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; + + pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; /// Regex for safe identifiers. @@ -51,3 +80,5 @@ pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat = pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).") .format(&SINGLE_LINE_COMMENT_FORMAT) .schema(); + +pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 34c7a09f..08335b93 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -33,8 +33,6 @@ use proxmox::api::api; use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; -use super::PROXMOX_SAFE_ID_REGEX_STR; - // we only allow a limited set of characters // colon is not allowed, because we store usernames in // colon separated lists)! From 419a6ce60f78f0d47491d8d7d9d6d2b6f132615a Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 13:47:17 +0200 Subject: [PATCH 006/299] move UPID to pbs-api-types, add UPIDExt pbs-server side related methods are added via the UPIDExt trait Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 4 ++ pbs-api-types/src/lib.rs | 2 + pbs-api-types/src/upid.rs | 143 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+) create mode 100644 pbs-api-types/src/upid.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 9f3e9f5f..cd3a7073 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -8,7 +8,11 @@ description = "general API type helpers for PBS" [dependencies] anyhow = "1.0" lazy_static = "1.4" +nix = "0.19.1" +libc = "0.2" regex = "1.2" serde = { version = "1.0", features = ["derive"] } proxmox = { version = "0.11.5", default-features = false, features = [ "api-macro" ] } + +pbs-systemd = { path = "../pbs-systemd" } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 7775324d..50072bf4 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -36,6 +36,8 @@ pub use userid::{Tokenname, TokennameRef}; pub use userid::{Username, UsernameRef}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; +pub mod upid; + const_regex! { pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs new file mode 100644 index 00000000..5666df1e --- /dev/null +++ b/pbs-api-types/src/upid.rs @@ -0,0 +1,143 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; + +use anyhow::{bail, Error}; + +use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::const_regex; +use proxmox::sys::linux::procfs; + +use crate::Authid; + +/// Unique Process/Task Identifier +/// +/// We use this to uniquely identify worker task. UPIDs have a short +/// string repesentaion, which gives additional information about the +/// type of the task. for example: +/// ```text +/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}: +/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam: +/// ``` +/// Please note that we use tokio, so a single thread can run multiple +/// tasks. +// #[api] - manually implemented API type +#[derive(Debug, Clone)] +pub struct UPID { + /// The Unix PID + pub pid: libc::pid_t, + /// The Unix process start time from `/proc/pid/stat` + pub pstart: u64, + /// The task start time (Epoch) + pub starttime: i64, + /// The task ID (inside the process/thread) + pub task_id: usize, + /// Worker type (arbitrary ASCII string) + pub worker_type: String, + /// Worker ID (arbitrary ASCII string) + pub worker_id: Option, + /// The authenticated entity who started the task + pub auth_id: Authid, + /// The node name. + pub node: String, +} + +proxmox::forward_serialize_to_display!(UPID); +proxmox::forward_deserialize_to_from_str!(UPID); + +const_regex! { + pub PROXMOX_UPID_REGEX = concat!( + r"^UPID:(?P[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P[0-9A-Fa-f]{8}):", + r"(?P[0-9A-Fa-f]{8,9}):(?P[0-9A-Fa-f]{8,16}):(?P[0-9A-Fa-f]{8}):", + r"(?P[^:\s]+):(?P[^:\s]*):(?P[^:\s]+):$" + ); +} + +pub const PROXMOX_UPID_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX); + +impl UPID { + pub const API_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") + .min_length("UPID:N:12345678:12345678:12345678:::".len()) + .max_length(128) // arbitrary + .format(&PROXMOX_UPID_FORMAT) + .schema(); + + /// Create a new UPID + pub fn new( + worker_type: &str, + worker_id: Option, + auth_id: Authid, + ) -> Result { + + let pid = unsafe { libc::getpid() }; + + let bad: &[_] = &['/', ':', ' ']; + + if worker_type.contains(bad) { + bail!("illegal characters in worker type '{}'", worker_type); + } + + static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0); + + let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst); + + Ok(UPID { + pid, + pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime, + starttime: proxmox::tools::time::epoch_i64(), + task_id, + worker_type: worker_type.to_owned(), + worker_id, + auth_id, + node: proxmox::tools::nodename().to_owned(), + }) + } +} + + +impl std::str::FromStr for UPID { + type Err = Error; + + fn from_str(s: &str) -> Result { + if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) { + + let worker_id = if cap["wid"].is_empty() { + None + } else { + let wid = pbs_systemd::unescape_unit(&cap["wid"])?; + Some(wid) + }; + + Ok(UPID { + pid: i32::from_str_radix(&cap["pid"], 16).unwrap(), + pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(), + starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(), + task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(), + worker_type: cap["wtype"].to_string(), + worker_id, + auth_id: cap["authid"].parse()?, + node: cap["node"].to_string(), + }) + } else { + bail!("unable to parse UPID '{}'", s); + } + + } +} + +impl std::fmt::Display for UPID { + + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + + let wid = if let Some(ref id) = self.worker_id { + pbs_systemd::escape_unit(id, false) + } else { + String::new() + }; + + // Note: pstart can be > 32bit if uptime > 497 days, so this can result in + // more that 8 characters for pstart + + write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:", + self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id) + } +} From 0d5d32a76ab83d07c43af4c11902c028bd3e5142 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 7 Jul 2021 14:37:47 +0200 Subject: [PATCH 007/299] move chunk_store to pbs-datastore Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 57 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 50072bf4..cc5103f4 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,5 +1,8 @@ //! Basic API types used by most of the PBS code. +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; @@ -37,6 +40,7 @@ pub use userid::{Username, UsernameRef}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; pub mod upid; +pub use upid::UPID; const_regex! { pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); @@ -84,3 +88,56 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl .schema(); pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); + +#[api( + properties: { + "upid": { + optional: true, + type: UPID, + }, + }, +)] +#[derive(Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Garbage collection status. +pub struct GarbageCollectionStatus { + pub upid: Option, + /// Number of processed index files. + pub index_file_count: usize, + /// Sum of bytes referred by index files. + pub index_data_bytes: u64, + /// Bytes used on disk. + pub disk_bytes: u64, + /// Chunks used on disk. + pub disk_chunks: usize, + /// Sum of removed bytes. + pub removed_bytes: u64, + /// Number of removed chunks. + pub removed_chunks: usize, + /// Sum of pending bytes (pending removal - kept for safety). + pub pending_bytes: u64, + /// Number of pending chunks (pending removal - kept for safety). + pub pending_chunks: usize, + /// Number of chunks marked as .bad by verify that have been removed by GC. + pub removed_bad: usize, + /// Number of chunks still marked as .bad after garbage collection. + pub still_bad: usize, +} + +impl Default for GarbageCollectionStatus { + fn default() -> Self { + GarbageCollectionStatus { + upid: None, + index_file_count: 0, + index_data_bytes: 0, + disk_bytes: 0, + disk_chunks: 0, + removed_bytes: 0, + removed_chunks: 0, + pending_bytes: 0, + pending_chunks: 0, + removed_bad: 0, + still_bad: 0, + } + } +} From 0b8cd2b305b6115ee93c376ba4f626c6325139a2 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 9 Jul 2021 11:31:53 +0200 Subject: [PATCH 008/299] move some api types and resolve imports in preparation of moving client & proxmox_client_tools out into a pbs-client subcrate Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 56 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index cc5103f4..8e2d1a7c 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; +use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; #[rustfmt::skip] #[macro_export] @@ -42,7 +43,37 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN pub mod upid; pub use upid::UPID; +#[rustfmt::skip] +#[macro_use] +mod local_macros { + macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") } + macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) } + macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) } + macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) } + macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") } + macro_rules! DNS_ALIAS_NAME { + () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")")) + } +} + const_regex! { + pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$"); + pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$"); + pub IP_REGEX = concat!(r"^", IPRE!(), r"$"); + pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$"); + pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$"); + pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$"); + pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$"; + pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$"); + pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$"); + pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$"); + + pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ? + + pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters + + pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$"; + pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); @@ -67,8 +98,23 @@ const_regex! { pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; + + pub BACKUP_REPO_URL_REGEX = concat!( + r"^^(?:(?:(", + USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), + ")@)?(", + DNS_NAME!(), "|", IPRE_BRACKET!(), + "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$" + ); } +pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX); +pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX); +pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX); +pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX); +pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX); +pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); + pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX); @@ -89,6 +135,9 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); +/// API schema format definition for repository URLs +pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); + #[api( properties: { "upid": { @@ -141,3 +190,10 @@ impl Default for GarbageCollectionStatus { } } } + +pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); +pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); + +pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); + +pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); From 1e00eae767cab188ef911c5adfc384d4a1207208 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 9 Jul 2021 14:26:42 +0200 Subject: [PATCH 009/299] move more api types for the client Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/crypto.rs | 57 +++++++++ pbs-api-types/src/lib.rs | 229 +++++++++++++++++++++++++++++++++++- 3 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 pbs-api-types/src/crypto.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index cd3a7073..2463d69d 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -16,3 +16,4 @@ serde = { version = "1.0", features = ["derive"] } proxmox = { version = "0.11.5", default-features = false, features = [ "api-macro" ] } pbs-systemd = { path = "../pbs-systemd" } +pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs new file mode 100644 index 00000000..7b36e85f --- /dev/null +++ b/pbs-api-types/src/crypto.rs @@ -0,0 +1,57 @@ +use std::fmt::{self, Display}; + +use anyhow::Error; +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; + +use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint}; + +#[api(default: "encrypt")] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither. +pub enum CryptMode { + /// Don't encrypt. + None, + /// Encrypt. + Encrypt, + /// Only sign. + SignOnly, +} + +#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)] +#[serde(transparent)] +/// 32-byte fingerprint, usually calculated with SHA256. +pub struct Fingerprint { + #[serde(with = "bytes_as_fingerprint")] + bytes: [u8; 32], +} + +impl Fingerprint { + pub fn new(bytes: [u8; 32]) -> Self { + Self { bytes } + } + pub fn bytes(&self) -> &[u8; 32] { + &self.bytes + } +} + +/// Display as short key ID +impl Display for Fingerprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", as_fingerprint(&self.bytes[0..8])) + } +} + +impl std::str::FromStr for Fingerprint { + type Err = Error; + + fn from_str(s: &str) -> Result { + let mut tmp = s.to_string(); + tmp.retain(|c| c != ':'); + let bytes = proxmox::tools::hex_to_digest(&tmp)?; + Ok(Fingerprint::new(bytes)) + } +} + diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 8e2d1a7c..2d15e92e 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::api::schema::{ApiStringFormat, EnumEntry, IntegerSchema, Schema, StringSchema}; use proxmox::const_regex; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; @@ -43,6 +43,9 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN pub mod upid; pub use upid::UPID; +mod crypto; +pub use crypto::{CryptMode, Fingerprint}; + #[rustfmt::skip] #[macro_use] mod local_macros { @@ -115,6 +118,26 @@ pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_RE pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX); pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); +pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") + .format(&BACKUP_ID_FORMAT) + .schema(); +pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.") + .format(&ApiStringFormat::Enum(&[ + EnumEntry::new("vm", "Virtual Machine Backup"), + EnumEntry::new("ct", "Container Backup"), + EnumEntry::new("host", "Host Backup"), + ])) + .schema(); +pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)") + .minimum(1_547_797_308) + .schema(); + +pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX); @@ -197,3 +220,207 @@ pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA25 pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); + +pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .schema(); + +// Complex type definitions + +#[api( + properties: { + "filename": { + schema: BACKUP_ARCHIVE_NAME_SCHEMA, + }, + "crypt-mode": { + type: CryptMode, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about archive files inside a backup snapshot. +pub struct BackupContent { + pub filename: String, + /// Info if file is encrypted, signed, or neither. + #[serde(skip_serializing_if = "Option::is_none")] + pub crypt_mode: Option, + /// Archive size (from backup manifest). + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Result of a verify operation. +pub enum VerifyState { + /// Verification was successful + Ok, + /// Verification reported one or more errors + Failed, +} + +#[api( + properties: { + upid: { + type: UPID, + }, + state: { + type: VerifyState, + }, + }, +)] +#[derive(Serialize, Deserialize)] +/// Task properties. +pub struct SnapshotVerifyState { + /// UPID of the verify task + pub upid: UPID, + /// State of the verification. Enum. + pub state: VerifyState, +} + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + comment: { + schema: SINGLE_LINE_COMMENT_SCHEMA, + optional: true, + }, + verification: { + type: SnapshotVerifyState, + optional: true, + }, + fingerprint: { + type: String, + optional: true, + }, + files: { + items: { + schema: BACKUP_ARCHIVE_NAME_SCHEMA + }, + }, + owner: { + type: Authid, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about backup snapshot. +pub struct SnapshotListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub backup_time: i64, + /// The first line from manifest "notes" + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// The result of the last run verify task + #[serde(skip_serializing_if = "Option::is_none")] + pub verification: Option, + /// Fingerprint of encryption key + #[serde(skip_serializing_if = "Option::is_none")] + pub fingerprint: Option, + /// List of contained archive files. + pub files: Vec, + /// Overall snapshot size (sum of all archive sizes). + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, + /// The owner of the snapshots group + #[serde(skip_serializing_if = "Option::is_none")] + pub owner: Option, +} + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "last-backup": { + schema: BACKUP_TIME_SCHEMA, + }, + "backup-count": { + type: Integer, + }, + files: { + items: { + schema: BACKUP_ARCHIVE_NAME_SCHEMA + }, + }, + owner: { + type: Authid, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a backup group. +pub struct GroupListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub last_backup: i64, + /// Number of contained snapshots + pub backup_count: u64, + /// List of contained archive files. + pub files: Vec, + /// The owner of group + #[serde(skip_serializing_if = "Option::is_none")] + pub owner: Option, +} + +#[api( + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a datastore. +pub struct DataStoreListItem { + pub store: String, + pub comment: Option, +} + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Prune result. +pub struct PruneListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub backup_time: i64, + /// Keep snapshot + pub keep: bool, +} From a5093db6f321fc41fa42db0a7135a59a7801fb1a Mon Sep 17 00:00:00 2001 From: Stefan Reiter Date: Thu, 8 Jul 2021 16:45:27 +0200 Subject: [PATCH 010/299] api: add support for notes on backup groups Stored in atomically-updated 'notes' file in backup group directory. Available via dedicated GET/PUT API calls, as well as the first line being included in list_groups (similar to list_snapshots). Signed-off-by: Stefan Reiter Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 2d15e92e..f9fe5cfb 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -380,6 +380,9 @@ pub struct GroupListItem { /// The owner of group #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, + /// The first line from group "notes" + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, } #[api( From a25f4f3b3694d8d7f8f783f2a7bd388f2ed5f76d Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 16 Jul 2021 10:53:18 +0200 Subject: [PATCH 011/299] api-types: move PRUNE_SCHEMA_KEEP_* to pbs-api-types Signed-off-by: Dominik Csapak --- pbs-api-types/src/lib.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index f9fe5cfb..c07699d1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -146,6 +146,36 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = .format(&FINGERPRINT_SHA256_FORMAT) .schema(); +pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new( + "Number of daily backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new( + "Number of hourly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new( + "Number of backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new( + "Number of monthly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new( + "Number of weekly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new( + "Number of yearly backups to keep.") + .minimum(1) + .schema(); + pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); From 1081dc8d59aa768412ebf40eb5f5bcf911ae56ae Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 19 Jul 2021 10:50:18 +0200 Subject: [PATCH 012/299] move client to pbs-client subcrate Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 57 ++++++---- pbs-api-types/src/user.rs | 205 ++++++++++++++++++++++++++++++++++++ pbs-api-types/src/userid.rs | 1 - 3 files changed, 240 insertions(+), 23 deletions(-) create mode 100644 pbs-api-types/src/user.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index c07699d1..00d1e3a5 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -40,6 +40,13 @@ pub use userid::{Tokenname, TokennameRef}; pub use userid::{Username, UsernameRef}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; +#[macro_use] +mod user; +pub use user::{ApiToken, User, UserWithTokens}; +pub use user::{ + EMAIL_SCHEMA, ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, +}; + pub mod upid; pub use upid::UPID; @@ -146,35 +153,33 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = .format(&FINGERPRINT_SHA256_FORMAT) .schema(); -pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new( - "Number of daily backups to keep.") +pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.") .minimum(1) .schema(); -pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new( - "Number of hourly backups to keep.") +pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = + IntegerSchema::new("Number of hourly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.") .minimum(1) .schema(); -pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new( - "Number of backups to keep.") - .minimum(1) - .schema(); +pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = + IntegerSchema::new("Number of monthly backups to keep.") + .minimum(1) + .schema(); -pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new( - "Number of monthly backups to keep.") - .minimum(1) - .schema(); +pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = + IntegerSchema::new("Number of weekly backups to keep.") + .minimum(1) + .schema(); -pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new( - "Number of weekly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new( - "Number of yearly backups to keep.") - .minimum(1) - .schema(); +pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = + IntegerSchema::new("Number of yearly backups to keep.") + .minimum(1) + .schema(); pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); @@ -186,6 +191,14 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl .format(&SINGLE_LINE_COMMENT_FORMAT) .schema(); +pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( + "Prevent changes if current configuration file has different \ + SHA256 digest. This can be used to prevent concurrent \ + modifications.", +) +.format(&PVE_CONFIG_DIGEST_FORMAT) +.schema(); + pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); /// API schema format definition for repository URLs @@ -411,7 +424,7 @@ pub struct GroupListItem { #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, /// The first line from group "notes" - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, } diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs new file mode 100644 index 00000000..9111ccea --- /dev/null +++ b/pbs-api-types/src/user.rs @@ -0,0 +1,205 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; +use proxmox::api::schema::{BooleanSchema, IntegerSchema, Schema, StringSchema}; + +use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; +use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA}; + +pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new( + "Enable the account (default). You can set this to '0' to disable the account.") + .default(true) + .schema(); + +pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new( + "Account expiration date (seconds since epoch). '0' means no expiration date.") + .default(0) + .minimum(0) + .schema(); + +pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(2) + .max_length(64) + .schema(); + +pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(2) + .max_length(64) + .schema(); + +pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(2) + .max_length(64) + .schema(); + +#[api( + properties: { + userid: { + type: Userid, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + enable: { + optional: true, + schema: ENABLE_USER_SCHEMA, + }, + expire: { + optional: true, + schema: EXPIRE_USER_SCHEMA, + }, + firstname: { + optional: true, + schema: FIRST_NAME_SCHEMA, + }, + lastname: { + schema: LAST_NAME_SCHEMA, + optional: true, + }, + email: { + schema: EMAIL_SCHEMA, + optional: true, + }, + tokens: { + type: Array, + optional: true, + description: "List of user's API tokens.", + items: { + type: ApiToken + }, + }, + } +)] +#[derive(Serialize,Deserialize)] +/// User properties with added list of ApiTokens +pub struct UserWithTokens { + pub userid: Userid, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub enable: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub expire: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub firstname: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub lastname: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub email: Option, + #[serde(skip_serializing_if="Vec::is_empty", default)] + pub tokens: Vec, +} + +#[api( + properties: { + tokenid: { + schema: PROXMOX_TOKEN_ID_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + enable: { + optional: true, + schema: ENABLE_USER_SCHEMA, + }, + expire: { + optional: true, + schema: EXPIRE_USER_SCHEMA, + }, + } +)] +#[derive(Serialize,Deserialize)] +/// ApiToken properties. +pub struct ApiToken { + pub tokenid: Authid, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub enable: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub expire: Option, +} + +impl ApiToken { + pub fn is_active(&self) -> bool { + if !self.enable.unwrap_or(true) { + return false; + } + if let Some(expire) = self.expire { + let now = proxmox::tools::time::epoch_i64(); + if expire > 0 && expire <= now { + return false; + } + } + true + } +} + +#[api( + properties: { + userid: { + type: Userid, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + enable: { + optional: true, + schema: ENABLE_USER_SCHEMA, + }, + expire: { + optional: true, + schema: EXPIRE_USER_SCHEMA, + }, + firstname: { + optional: true, + schema: FIRST_NAME_SCHEMA, + }, + lastname: { + schema: LAST_NAME_SCHEMA, + optional: true, + }, + email: { + schema: EMAIL_SCHEMA, + optional: true, + }, + } +)] +#[derive(Serialize,Deserialize)] +/// User properties. +pub struct User { + pub userid: Userid, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub enable: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub expire: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub firstname: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub lastname: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub email: Option, +} + +impl User { + pub fn is_active(&self) -> bool { + if !self.enable.unwrap_or(true) { + return false; + } + if let Some(expire) = self.expire { + let now = proxmox::tools::time::epoch_i64(); + if expire > 0 && expire <= now { + return false; + } + } + true + } +} diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 08335b93..e931181e 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -98,7 +98,6 @@ pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema = .max_length(32); pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema(); - #[api( type: String, format: &PROXMOX_USER_NAME_FORMAT, From 711535cdeed32e845363fd163ba5c537a1efedb3 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 19 Jul 2021 14:59:51 +0200 Subject: [PATCH 013/299] move some api types to pbs-api-types and resolve some imports in the client binary Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 55 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 00d1e3a5..a95cbf6a 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -470,3 +470,58 @@ pub struct PruneListItem { /// Keep snapshot pub keep: bool, } + +#[api()] +#[derive(Default, Serialize, Deserialize)] +/// Storage space usage information. +pub struct StorageStatus { + /// Total space (bytes). + pub total: u64, + /// Used space (bytes). + pub used: u64, + /// Available space (bytes). + pub avail: u64, +} + +#[api()] +#[derive(Serialize, Deserialize, Default)] +/// Backup Type group/snapshot counts. +pub struct TypeCounts { + /// The number of groups of the type. + pub groups: u64, + /// The number of snapshots of the type. + pub snapshots: u64, +} + +#[api( + properties: { + ct: { + type: TypeCounts, + optional: true, + }, + host: { + type: TypeCounts, + optional: true, + }, + vm: { + type: TypeCounts, + optional: true, + }, + other: { + type: TypeCounts, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Default)] +/// Counts of groups/snapshots per BackupType. +pub struct Counts { + /// The counts for CT backups + pub ct: Option, + /// The counts for Host backups + pub host: Option, + /// The counts for VM backups + pub vm: Option, + /// The counts for other backup types + pub other: Option, +} From e7ff5817ec5433b332a1aab65885fcb44f82b2f0 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 20 Jul 2021 13:51:55 +0200 Subject: [PATCH 014/299] add helpers to write configuration files --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 2463d69d..564a2101 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -13,7 +13,7 @@ libc = "0.2" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.11.5", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.12.0", default-features = false, features = [ "api-macro" ] } pbs-systemd = { path = "../pbs-systemd" } pbs-tools = { path = "../pbs-tools" } From 7bddd33ede155f5f2a6127562cafad0597f49b77 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 21 Jul 2021 14:12:22 +0200 Subject: [PATCH 015/299] move remaining client tools to pbs-tools/datastore pbs-datastore now ended up depending on tokio after all, but that's fine for now for the fuse code I added pbs-fuse-loop (has the old fuse_loop and its 'loopdev' module) ultimately only binaries should depend on this to avoid the library link the only thins remaining to move out the client binary are the api method return types, those will need to be moved to pbs-api-types... Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 3 ++- pbs-api-types/src/lib.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 564a2101..c8372ba4 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -8,8 +8,9 @@ description = "general API type helpers for PBS" [dependencies] anyhow = "1.0" lazy_static = "1.4" -nix = "0.19.1" libc = "0.2" +nix = "0.19.1" +openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index a95cbf6a..576099eb 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -525,3 +525,42 @@ pub struct Counts { /// The counts for other backup types pub other: Option, } + +pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(1) + .max_length(64) + .schema(); + + +#[api] +#[derive(Deserialize, Serialize)] +/// RSA public key information +pub struct RsaPubKeyInfo { + /// Path to key (if stored in a file) + #[serde(skip_serializing_if="Option::is_none")] + pub path: Option, + /// RSA exponent + pub exponent: String, + /// Hex-encoded RSA modulus + pub modulus: String, + /// Key (modulus) length in bits + pub length: usize, +} + +impl std::convert::TryFrom> for RsaPubKeyInfo { + type Error = anyhow::Error; + + fn try_from(value: openssl::rsa::Rsa) -> Result { + let modulus = value.n().to_hex_str()?.to_string(); + let exponent = value.e().to_dec_str()?.to_string(); + let length = value.size() as usize * 8; + + Ok(Self { + path: None, + exponent, + modulus, + length, + }) + } +} From 0b1ecc260af3afc71fde8c674025f8031db102e2 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 22 Jul 2021 09:58:31 +0200 Subject: [PATCH 016/299] cargo: update proxmox to 0.12.1 For the FS compat improvement in the atomic create file helper Signed-off-by: Thomas Lamprecht --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index c8372ba4..4eb0574d 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.12.0", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.12.1", default-features = false, features = [ "api-macro" ] } pbs-systemd = { path = "../pbs-systemd" } pbs-tools = { path = "../pbs-tools" } From d0103000b8fb3dec040cdc952ec082bd45f8d6e4 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 12 Aug 2021 09:30:41 +0200 Subject: [PATCH 017/299] use new api updater features Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/userid.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index e931181e..8418f13e 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -30,7 +30,7 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema, Updatable}; use proxmox::const_regex; // we only allow a limited set of characters @@ -403,6 +403,12 @@ pub struct Userid { name_len: usize, } +impl Updatable for Userid { + type Updater = Option; + + const UPDATER_IS_OPTION: bool = true; +} + impl Userid { pub const API_SCHEMA: Schema = StringSchema::new("User ID") .format(&PROXMOX_USER_ID_FORMAT) From 35e7f2f48e199436d3ffd8836bcd3236b423dd2f Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 25 Aug 2021 09:37:54 +0200 Subject: [PATCH 018/299] use ApiType trait Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/upid.rs | 8 +++++--- pbs-api-types/src/userid.rs | 21 ++++++++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs index 5666df1e..9447b8a0 100644 --- a/pbs-api-types/src/upid.rs +++ b/pbs-api-types/src/upid.rs @@ -2,7 +2,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use anyhow::{bail, Error}; -use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; +use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema}; use proxmox::const_regex; use proxmox::sys::linux::procfs; @@ -54,13 +54,15 @@ const_regex! { pub const PROXMOX_UPID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX); -impl UPID { - pub const API_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") +impl ApiType for UPID { + const API_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") .min_length("UPID:N:12345678:12345678:12345678:::".len()) .max_length(128) // arbitrary .format(&PROXMOX_UPID_FORMAT) .schema(); +} +impl UPID { /// Create a new UPID pub fn new( worker_type: &str, diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 8418f13e..5ecae4df 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -30,7 +30,7 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema, Updatable}; +use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, Updatable}; use proxmox::const_regex; // we only allow a limited set of characters @@ -409,13 +409,15 @@ impl Updatable for Userid { const UPDATER_IS_OPTION: bool = true; } -impl Userid { - pub const API_SCHEMA: Schema = StringSchema::new("User ID") +impl ApiType for Userid { + const API_SCHEMA: Schema = StringSchema::new("User ID") .format(&PROXMOX_USER_ID_FORMAT) .min_length(3) .max_length(64) .schema(); +} +impl Userid { const fn new(data: String, name_len: usize) -> Self { Self { data, name_len } } @@ -538,13 +540,22 @@ pub struct Authid { tokenname: Option } -impl Authid { - pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID") + +impl Updatable for Authid { + type Updater = Option; + + const UPDATER_IS_OPTION: bool = true; +} + +impl ApiType for Authid { + const API_SCHEMA: Schema = StringSchema::new("Authentication ID") .format(&PROXMOX_AUTH_ID_FORMAT) .min_length(3) .max_length(64) .schema(); +} +impl Authid { const fn new(user: Userid, tokenname: Option) -> Self { Self { user, tokenname } } From 6970858aaddcad0282eec832ffb8f9e49c34a77a Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 24 Aug 2021 15:32:27 +0200 Subject: [PATCH 019/299] bump proxmox dependency to 0.13.0 and with it: * bump proxmox-http dependency to 0.4.0 * bump proxmox-apt dependency to 0.7.0 Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 4eb0574d..15507328 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.12.1", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.13.0", default-features = false, features = [ "api-macro" ] } pbs-systemd = { path = "../pbs-systemd" } pbs-tools = { path = "../pbs-tools" } From 12312bcb36ca1c1b234473e53cef0bddaa17464a Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 24 Aug 2021 16:12:58 +0200 Subject: [PATCH 020/299] more Updatable -> UpdaterType fixups Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/userid.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 5ecae4df..1794c720 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -30,7 +30,7 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, Updatable}; +use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType}; use proxmox::const_regex; // we only allow a limited set of characters @@ -397,18 +397,12 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef { } /// A complete user id consisting of a user name and a realm -#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, UpdaterType)] pub struct Userid { data: String, name_len: usize, } -impl Updatable for Userid { - type Updater = Option; - - const UPDATER_IS_OPTION: bool = true; -} - impl ApiType for Userid { const API_SCHEMA: Schema = StringSchema::new("User ID") .format(&PROXMOX_USER_ID_FORMAT) @@ -534,19 +528,12 @@ impl PartialEq for Userid { } /// A complete authentication id consisting of a user id and an optional token name. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)] pub struct Authid { user: Userid, tokenname: Option } - -impl Updatable for Authid { - type Updater = Option; - - const UPDATER_IS_OPTION: bool = true; -} - impl ApiType for Authid { const API_SCHEMA: Schema = StringSchema::new("Authentication ID") .format(&PROXMOX_AUTH_ID_FORMAT) From 6dc073fa0fae1ce9f04c4b3621d34014cbfd9a9f Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 26 Aug 2021 13:17:55 +0200 Subject: [PATCH 021/299] move some API return types to pbs-api-types they'll be required by the api client Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 76 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 576099eb..1c9063bf 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,7 +3,10 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, EnumEntry, IntegerSchema, Schema, StringSchema}; +use proxmox::api::schema::{ + ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema, + StringSchema, +}; use proxmox::const_regex; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; @@ -564,3 +567,74 @@ impl std::convert::TryFrom> for RsaPubK }) } } + +#[api( + properties: { + upid: { schema: UPID::API_SCHEMA }, + }, +)] +#[derive(Serialize, Deserialize)] +/// Task properties. +pub struct TaskListItem { + pub upid: String, + /// The node name where the task is running on. + pub node: String, + /// The Unix PID + pub pid: i64, + /// The task start time (Epoch) + pub pstart: u64, + /// The task start time (Epoch) + pub starttime: i64, + /// Worker type (arbitrary ASCII string) + pub worker_type: String, + /// Worker ID (arbitrary ASCII string) + pub worker_id: Option, + /// The authenticated entity who started the task + pub user: Authid, + /// The task end time (Epoch) + #[serde(skip_serializing_if="Option::is_none")] + pub endtime: Option, + /// Task end status + #[serde(skip_serializing_if="Option::is_none")] + pub status: Option, +} + +pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of snapshots.", + &SnapshotListItem::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of archive files inside a backup snapshots.", + &BackupContent::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of backup groups.", + &GroupListItem::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of snapshots and a flag indicating if there are kept or removed.", + &PruneListItem::API_SCHEMA, + ).schema(), +}; + +pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "A list of tasks.", + &TaskListItem::API_SCHEMA, + ).schema(), +}; From 199227bd01a64d2a12328832d771dc2344591da8 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 31 Aug 2021 10:45:32 +0200 Subject: [PATCH 022/299] move some more API types ArchiveEntry -> pbs-datastore RestoreDaemonStatus -> pbs-api-types Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/file_restore.rs | 15 +++++++++++++++ pbs-api-types/src/lib.rs | 2 ++ 2 files changed, 17 insertions(+) create mode 100644 pbs-api-types/src/file_restore.rs diff --git a/pbs-api-types/src/file_restore.rs b/pbs-api-types/src/file_restore.rs new file mode 100644 index 00000000..eedb172b --- /dev/null +++ b/pbs-api-types/src/file_restore.rs @@ -0,0 +1,15 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; + +#[api] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// General status information about a running VM file-restore daemon +pub struct RestoreDaemonStatus { + /// VM uptime in seconds + pub uptime: i64, + /// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is + /// not set, as then the status call will have reset the timer before returning the value + pub timeout: i64, +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 1c9063bf..024181ee 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -56,6 +56,8 @@ pub use upid::UPID; mod crypto; pub use crypto::{CryptMode, Fingerprint}; +pub mod file_restore; + #[rustfmt::skip] #[macro_use] mod local_macros { From 401cf57883b94a938c742fc6a756ad5d3241a468 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 1 Sep 2021 14:37:11 +0200 Subject: [PATCH 023/299] another import cleanup Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 024181ee..14c8cd35 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -121,6 +121,8 @@ const_regex! { DNS_NAME!(), "|", IPRE_BRACKET!(), "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$" ); + + pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$"; } pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX); From 3fc017a5700db13e56c2ab03039bf8514bd9d54b Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 2 Sep 2021 12:47:11 +0200 Subject: [PATCH 024/299] start new pbs-config workspace moved src/config/domains.rs --- pbs-api-types/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 14c8cd35..aa0dd9a1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -152,6 +152,12 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") .max_length(32) .schema(); +pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(2) + .max_length(32) + .schema(); + pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX); From 6b977533d6d59c2a7235576ad1898ae14ab837ad Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 2 Sep 2021 14:25:15 +0200 Subject: [PATCH 025/299] move remote config into pbs-config workspace --- pbs-api-types/src/lib.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index aa0dd9a1..0aa9374c 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -58,6 +58,9 @@ pub use crypto::{CryptMode, Fingerprint}; pub mod file_restore; +mod remote; +pub use remote::*; + #[rustfmt::skip] #[macro_use] mod local_macros { @@ -132,6 +135,16 @@ pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_RE pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX); pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); +pub const DNS_NAME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&DNS_NAME_REGEX); + +pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX); + +pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.") + .format(&DNS_NAME_OR_IP_FORMAT) + .schema(); + pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") .format(&BACKUP_ID_FORMAT) .schema(); From 1c30b9da92775839664e8aeb11c81320a1543463 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 2 Sep 2021 17:36:13 +0200 Subject: [PATCH 026/299] add missing file pbs-api-types/src/remote.rs --- pbs-api-types/src/remote.rs | 86 +++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 pbs-api-types/src/remote.rs diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs new file mode 100644 index 00000000..2784e353 --- /dev/null +++ b/pbs-api-types/src/remote.rs @@ -0,0 +1,86 @@ +use serde::{Deserialize, Serialize}; + +use super::*; +use proxmox::api::{api, schema::*}; + +pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.") + .format(&PASSWORD_FORMAT) + .min_length(1) + .max_length(1024) + .schema(); + +pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).") + .format(&PASSWORD_FORMAT) + .min_length(1) + .max_length(1024) + .schema(); + +pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + + +#[api( + properties: { + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + host: { + schema: DNS_NAME_OR_IP_SCHEMA, + }, + port: { + optional: true, + description: "The (optional) port", + type: u16, + }, + "auth-id": { + type: Authid, + }, + fingerprint: { + optional: true, + schema: CERT_FINGERPRINT_SHA256_SCHEMA, + }, + }, +)] +#[derive(Serialize,Deserialize,Updater)] +#[serde(rename_all = "kebab-case")] +/// Remote configuration properties. +pub struct RemoteConfig { + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + pub host: String, + #[serde(skip_serializing_if="Option::is_none")] + pub port: Option, + pub auth_id: Authid, + #[serde(skip_serializing_if="Option::is_none")] + pub fingerprint: Option, +} + +#[api( + properties: { + name: { + schema: REMOTE_ID_SCHEMA, + }, + config: { + type: RemoteConfig, + }, + password: { + schema: REMOTE_PASSWORD_BASE64_SCHEMA, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Remote properties. +pub struct Remote { + pub name: String, + // Note: The stored password is base64 encoded + #[serde(skip_serializing_if="String::is_empty")] + #[serde(with = "proxmox::tools::serde::string_as_base64")] + pub password: String, + #[serde(flatten)] + pub config: RemoteConfig, +} From 28e668ddf3a86d85f7613ab2a17cdd64168281ee Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 3 Sep 2021 09:10:18 +0200 Subject: [PATCH 027/299] move drive config to pbs_config workspace Also moved the tape type definitions to pbs_api_types. --- pbs-api-types/src/lib.rs | 3 + pbs-api-types/src/tape/changer.rs | 135 +++++++++++ pbs-api-types/src/tape/device.rs | 55 +++++ pbs-api-types/src/tape/drive.rs | 285 +++++++++++++++++++++++ pbs-api-types/src/tape/media.rs | 182 +++++++++++++++ pbs-api-types/src/tape/media_location.rs | 91 ++++++++ pbs-api-types/src/tape/media_pool.rs | 161 +++++++++++++ pbs-api-types/src/tape/media_status.rs | 21 ++ pbs-api-types/src/tape/mod.rs | 94 ++++++++ 9 files changed, 1027 insertions(+) create mode 100644 pbs-api-types/src/tape/changer.rs create mode 100644 pbs-api-types/src/tape/device.rs create mode 100644 pbs-api-types/src/tape/drive.rs create mode 100644 pbs-api-types/src/tape/media.rs create mode 100644 pbs-api-types/src/tape/media_location.rs create mode 100644 pbs-api-types/src/tape/media_pool.rs create mode 100644 pbs-api-types/src/tape/media_status.rs create mode 100644 pbs-api-types/src/tape/mod.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 0aa9374c..427b2d9f 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -61,6 +61,9 @@ pub mod file_restore; mod remote; pub use remote::*; +mod tape; +pub use tape::*; + #[rustfmt::skip] #[macro_use] mod local_macros { diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs new file mode 100644 index 00000000..24f529df --- /dev/null +++ b/pbs-api-types/src/tape/changer.rs @@ -0,0 +1,135 @@ +//! Types for tape changer API + +use serde::{Deserialize, Serialize}; + +use proxmox::api::{ + api, + schema::{ + Schema, + ApiStringFormat, + ArraySchema, + IntegerSchema, + StringSchema, + }, +}; + +use crate::{ + PROXMOX_SAFE_ID_FORMAT, + OptionalDeviceIdentification, +}; + +pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const SCSI_CHANGER_PATH_SCHEMA: Schema = StringSchema::new( + "Path to Linux generic SCSI device (e.g. '/dev/sg4')") + .schema(); + +pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(2) + .max_length(32) + .schema(); + +pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Slot list.", &IntegerSchema::new("Slot number") + .minimum(1) + .schema()) + .schema(); + +pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new("\ +A list of slot numbers, comma separated. Those slots are reserved for +Import/Export, i.e. any media in those slots are considered to be +'offline'. +") +.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) +.schema(); + +#[api( + properties: { + name: { + schema: CHANGER_NAME_SCHEMA, + }, + path: { + schema: SCSI_CHANGER_PATH_SCHEMA, + }, + "export-slots": { + schema: EXPORT_SLOT_LIST_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// SCSI tape changer +pub struct ScsiTapeChanger { + pub name: String, + pub path: String, + #[serde(skip_serializing_if="Option::is_none")] + pub export_slots: Option, +} + +#[api( + properties: { + config: { + type: ScsiTapeChanger, + }, + info: { + type: OptionalDeviceIdentification, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Changer config with optional device identification attributes +pub struct ChangerListEntry { + #[serde(flatten)] + pub config: ScsiTapeChanger, + #[serde(flatten)] + pub info: OptionalDeviceIdentification, +} + +#[api()] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Mtx Entry Kind +pub enum MtxEntryKind { + /// Drive + Drive, + /// Slot + Slot, + /// Import/Export Slot + ImportExport, +} + +#[api( + properties: { + "entry-kind": { + type: MtxEntryKind, + }, + "label-text": { + schema: MEDIA_LABEL_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Mtx Status Entry +pub struct MtxStatusEntry { + pub entry_kind: MtxEntryKind, + /// The ID of the slot or drive + pub entry_id: u64, + /// The media label (volume tag) if the slot/drive is full + #[serde(skip_serializing_if="Option::is_none")] + pub label_text: Option, + /// The slot the drive was loaded from + #[serde(skip_serializing_if="Option::is_none")] + pub loaded_slot: Option, + /// The current state of the drive + #[serde(skip_serializing_if="Option::is_none")] + pub state: Option, +} diff --git a/pbs-api-types/src/tape/device.rs b/pbs-api-types/src/tape/device.rs new file mode 100644 index 00000000..368a0015 --- /dev/null +++ b/pbs-api-types/src/tape/device.rs @@ -0,0 +1,55 @@ +use ::serde::{Deserialize, Serialize}; + +use proxmox::api::api; + +#[api()] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Optional Device Identification Attributes +pub struct OptionalDeviceIdentification { + /// Vendor (autodetected) + #[serde(skip_serializing_if="Option::is_none")] + pub vendor: Option, + /// Model (autodetected) + #[serde(skip_serializing_if="Option::is_none")] + pub model: Option, + /// Serial number (autodetected) + #[serde(skip_serializing_if="Option::is_none")] + pub serial: Option, +} + +#[api()] +#[derive(Debug,Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Kind of device +pub enum DeviceKind { + /// Tape changer (Autoloader, Robot) + Changer, + /// Normal SCSI tape device + Tape, +} + +#[api( + properties: { + kind: { + type: DeviceKind, + }, + }, +)] +#[derive(Debug,Serialize,Deserialize)] +/// Tape device information +pub struct TapeDeviceInfo { + pub kind: DeviceKind, + /// Path to the linux device node + pub path: String, + /// Serial number (autodetected) + pub serial: String, + /// Vendor (autodetected) + pub vendor: String, + /// Model (autodetected) + pub model: String, + /// Device major number + pub major: u32, + /// Device minor number + pub minor: u32, +} diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs new file mode 100644 index 00000000..3e207a99 --- /dev/null +++ b/pbs-api-types/src/tape/drive.rs @@ -0,0 +1,285 @@ +//! Types for tape drive API +use std::convert::TryFrom; + +use anyhow::{bail, Error}; +use serde::{Deserialize, Serialize}; + +use proxmox::api::{ + api, + schema::{Schema, IntegerSchema, StringSchema, Updater}, +}; + +use crate::{ + PROXMOX_SAFE_ID_FORMAT, + CHANGER_NAME_SCHEMA, + OptionalDeviceIdentification, +}; + +pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new( + "The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')") + .schema(); + +pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new( + "Associated changer drive number (requires option changer)") + .minimum(0) + .maximum(255) + .default(0) + .schema(); + +#[api( + properties: { + name: { + schema: DRIVE_NAME_SCHEMA, + } + } +)] +#[derive(Serialize,Deserialize)] +/// Simulate tape drives (only for test and debug) +#[serde(rename_all = "kebab-case")] +pub struct VirtualTapeDrive { + pub name: String, + /// Path to directory + pub path: String, + /// Virtual tape size + #[serde(skip_serializing_if="Option::is_none")] + pub max_size: Option, +} + +#[api( + properties: { + name: { + schema: DRIVE_NAME_SCHEMA, + }, + path: { + schema: LTO_DRIVE_PATH_SCHEMA, + }, + changer: { + schema: CHANGER_NAME_SCHEMA, + optional: true, + }, + "changer-drivenum": { + schema: CHANGER_DRIVENUM_SCHEMA, + optional: true, + }, + } +)] +#[derive(Serialize,Deserialize,Updater)] +#[serde(rename_all = "kebab-case")] +/// Lto SCSI tape driver +pub struct LtoTapeDrive { + #[updater(skip)] + pub name: String, + pub path: String, + #[serde(skip_serializing_if="Option::is_none")] + pub changer: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub changer_drivenum: Option, +} + +#[api( + properties: { + config: { + type: LtoTapeDrive, + }, + info: { + type: OptionalDeviceIdentification, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Drive list entry +pub struct DriveListEntry { + #[serde(flatten)] + pub config: LtoTapeDrive, + #[serde(flatten)] + pub info: OptionalDeviceIdentification, + /// the state of the drive if locked + #[serde(skip_serializing_if="Option::is_none")] + pub state: Option, +} + +#[api()] +#[derive(Serialize,Deserialize)] +/// Medium auxiliary memory attributes (MAM) +pub struct MamAttribute { + /// Attribute id + pub id: u16, + /// Attribute name + pub name: String, + /// Attribute value + pub value: String, +} + +#[api()] +#[derive(Serialize,Deserialize,Copy,Clone,Debug)] +pub enum TapeDensity { + /// Unknown (no media loaded) + Unknown, + /// LTO1 + LTO1, + /// LTO2 + LTO2, + /// LTO3 + LTO3, + /// LTO4 + LTO4, + /// LTO5 + LTO5, + /// LTO6 + LTO6, + /// LTO7 + LTO7, + /// LTO7M8 + LTO7M8, + /// LTO8 + LTO8, +} + +impl TryFrom for TapeDensity { + type Error = Error; + + fn try_from(value: u8) -> Result { + let density = match value { + 0x00 => TapeDensity::Unknown, + 0x40 => TapeDensity::LTO1, + 0x42 => TapeDensity::LTO2, + 0x44 => TapeDensity::LTO3, + 0x46 => TapeDensity::LTO4, + 0x58 => TapeDensity::LTO5, + 0x5a => TapeDensity::LTO6, + 0x5c => TapeDensity::LTO7, + 0x5d => TapeDensity::LTO7M8, + 0x5e => TapeDensity::LTO8, + _ => bail!("unknown tape density code 0x{:02x}", value), + }; + Ok(density) + } +} + +#[api( + properties: { + density: { + type: TapeDensity, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Drive/Media status for Lto SCSI drives. +/// +/// Media related data is optional - only set if there is a medium +/// loaded. +pub struct LtoDriveAndMediaStatus { + /// Vendor + pub vendor: String, + /// Product + pub product: String, + /// Revision + pub revision: String, + /// Block size (0 is variable size) + pub blocksize: u32, + /// Compression enabled + pub compression: bool, + /// Drive buffer mode + pub buffer_mode: u8, + /// Tape density + pub density: TapeDensity, + /// Media is write protected + #[serde(skip_serializing_if="Option::is_none")] + pub write_protect: Option, + /// Tape Alert Flags + #[serde(skip_serializing_if="Option::is_none")] + pub alert_flags: Option, + /// Current file number + #[serde(skip_serializing_if="Option::is_none")] + pub file_number: Option, + /// Current block number + #[serde(skip_serializing_if="Option::is_none")] + pub block_number: Option, + /// Medium Manufacture Date (epoch) + #[serde(skip_serializing_if="Option::is_none")] + pub manufactured: Option, + /// Total Bytes Read in Medium Life + #[serde(skip_serializing_if="Option::is_none")] + pub bytes_read: Option, + /// Total Bytes Written in Medium Life + #[serde(skip_serializing_if="Option::is_none")] + pub bytes_written: Option, + /// Number of mounts for the current volume (i.e., Thread Count) + #[serde(skip_serializing_if="Option::is_none")] + pub volume_mounts: Option, + /// Count of the total number of times the medium has passed over + /// the head. + #[serde(skip_serializing_if="Option::is_none")] + pub medium_passes: Option, + /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) + #[serde(skip_serializing_if="Option::is_none")] + pub medium_wearout: Option, +} + +#[api()] +/// Volume statistics from SCSI log page 17h +#[derive(Default, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct Lp17VolumeStatistics { + /// Volume mounts (thread count) + pub volume_mounts: u64, + /// Total data sets written + pub volume_datasets_written: u64, + /// Write retries + pub volume_recovered_write_data_errors: u64, + /// Total unrecovered write errors + pub volume_unrecovered_write_data_errors: u64, + /// Total suspended writes + pub volume_write_servo_errors: u64, + /// Total fatal suspended writes + pub volume_unrecovered_write_servo_errors: u64, + /// Total datasets read + pub volume_datasets_read: u64, + /// Total read retries + pub volume_recovered_read_errors: u64, + /// Total unrecovered read errors + pub volume_unrecovered_read_errors: u64, + /// Last mount unrecovered write errors + pub last_mount_unrecovered_write_errors: u64, + /// Last mount unrecovered read errors + pub last_mount_unrecovered_read_errors: u64, + /// Last mount bytes written + pub last_mount_bytes_written: u64, + /// Last mount bytes read + pub last_mount_bytes_read: u64, + /// Lifetime bytes written + pub lifetime_bytes_written: u64, + /// Lifetime bytes read + pub lifetime_bytes_read: u64, + /// Last load write compression ratio + pub last_load_write_compression_ratio: u64, + /// Last load read compression ratio + pub last_load_read_compression_ratio: u64, + /// Medium mount time + pub medium_mount_time: u64, + /// Medium ready time + pub medium_ready_time: u64, + /// Total native capacity + pub total_native_capacity: u64, + /// Total used native capacity + pub total_used_native_capacity: u64, + /// Write protect + pub write_protect: bool, + /// Volume is WORM + pub worm: bool, + /// Beginning of medium passes + pub beginning_of_medium_passes: u64, + /// Middle of medium passes + pub middle_of_tape_passes: u64, + /// Volume serial number + pub serial: String, +} diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs new file mode 100644 index 00000000..4e301c98 --- /dev/null +++ b/pbs-api-types/src/tape/media.rs @@ -0,0 +1,182 @@ +use ::serde::{Deserialize, Serialize}; + +use proxmox::{ + api::{api, schema::*}, + tools::Uuid, +}; + +use crate::{ + UUID_FORMAT, + MediaStatus, + MediaLocation, +}; + +pub const MEDIA_SET_UUID_SCHEMA: Schema = + StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).") + .format(&UUID_FORMAT) + .schema(); + +pub const MEDIA_UUID_SCHEMA: Schema = + StringSchema::new("Media Uuid.") + .format(&UUID_FORMAT) + .schema(); + +#[api( + properties: { + "media-set-uuid": { + schema: MEDIA_SET_UUID_SCHEMA, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Media Set list entry +pub struct MediaSetListEntry { + /// Media set name + pub media_set_name: String, + pub media_set_uuid: Uuid, + /// MediaSet creation time stamp + pub media_set_ctime: i64, + /// Media Pool + pub pool: String, +} + +#[api( + properties: { + location: { + type: MediaLocation, + }, + status: { + type: MediaStatus, + }, + uuid: { + schema: MEDIA_UUID_SCHEMA, + }, + "media-set-uuid": { + schema: MEDIA_SET_UUID_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Media list entry +pub struct MediaListEntry { + /// Media label text (or Barcode) + pub label_text: String, + pub uuid: Uuid, + /// Creation time stamp + pub ctime: i64, + pub location: MediaLocation, + pub status: MediaStatus, + /// Expired flag + pub expired: bool, + /// Catalog status OK + pub catalog: bool, + /// Media set name + #[serde(skip_serializing_if="Option::is_none")] + pub media_set_name: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub media_set_uuid: Option, + /// Media set seq_nr + #[serde(skip_serializing_if="Option::is_none")] + pub seq_nr: Option, + /// MediaSet creation time stamp + #[serde(skip_serializing_if="Option::is_none")] + pub media_set_ctime: Option, + /// Media Pool + #[serde(skip_serializing_if="Option::is_none")] + pub pool: Option, +} + +#[api( + properties: { + uuid: { + schema: MEDIA_UUID_SCHEMA, + }, + "media-set-uuid": { + schema: MEDIA_SET_UUID_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Media label info +pub struct MediaIdFlat { + /// Unique ID + pub uuid: Uuid, + /// Media label text (or Barcode) + pub label_text: String, + /// Creation time stamp + pub ctime: i64, + // All MediaSet properties are optional here + /// MediaSet Pool + #[serde(skip_serializing_if="Option::is_none")] + pub pool: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub media_set_uuid: Option, + /// MediaSet media sequence number + #[serde(skip_serializing_if="Option::is_none")] + pub seq_nr: Option, + /// MediaSet Creation time stamp + #[serde(skip_serializing_if="Option::is_none")] + pub media_set_ctime: Option, + /// Encryption key fingerprint + #[serde(skip_serializing_if="Option::is_none")] + pub encryption_key_fingerprint: Option, +} + +#[api( + properties: { + uuid: { + schema: MEDIA_UUID_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Label with optional Uuid +pub struct LabelUuidMap { + /// Changer label text (or Barcode) + pub label_text: String, + /// Associated Uuid (if any) + pub uuid: Option, +} + +#[api( + properties: { + uuid: { + schema: MEDIA_UUID_SCHEMA, + }, + "media-set-uuid": { + schema: MEDIA_SET_UUID_SCHEMA, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Media content list entry +pub struct MediaContentEntry { + /// Media label text (or Barcode) + pub label_text: String, + /// Media Uuid + pub uuid: Uuid, + /// Media set name + pub media_set_name: String, + /// Media set uuid + pub media_set_uuid: Uuid, + /// MediaSet Creation time stamp + pub media_set_ctime: i64, + /// Media set seq_nr + pub seq_nr: u64, + /// Media Pool + pub pool: String, + /// Datastore Name + pub store: String, + /// Backup snapshot + pub snapshot: String, + /// Snapshot creation time (epoch) + pub backup_time: i64, +} diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs new file mode 100644 index 00000000..a917c609 --- /dev/null +++ b/pbs-api-types/src/tape/media_location.rs @@ -0,0 +1,91 @@ +use anyhow::{bail, Error}; + +use proxmox::api::{ + schema::{ + Schema, + StringSchema, + ApiStringFormat, + parse_simple_value, + }, +}; + +use crate::{ + PROXMOX_SAFE_ID_FORMAT, + CHANGER_NAME_SCHEMA, +}; + +pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +#[derive(Debug, PartialEq, Clone)] +/// Media location +pub enum MediaLocation { + /// Ready for use (inside tape library) + Online(String), + /// Local available, but need to be mounted (insert into tape + /// drive) + Offline, + /// Media is inside a Vault + Vault(String), +} + +proxmox::forward_deserialize_to_from_str!(MediaLocation); +proxmox::forward_serialize_to_display!(MediaLocation); + +impl proxmox::api::schema::ApiType for MediaLocation { + const API_SCHEMA: Schema = StringSchema::new( + "Media location (e.g. 'offline', 'online-', 'vault-')") + .format(&ApiStringFormat::VerifyFn(|text| { + let location: MediaLocation = text.parse()?; + match location { + MediaLocation::Online(ref changer) => { + parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?; + } + MediaLocation::Vault(ref vault) => { + parse_simple_value(vault, &VAULT_NAME_SCHEMA)?; + } + MediaLocation::Offline => { /* OK */} + } + Ok(()) + })) + .schema(); +} + + +impl std::fmt::Display for MediaLocation { + + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MediaLocation::Offline => { + write!(f, "offline") + } + MediaLocation::Online(changer) => { + write!(f, "online-{}", changer) + } + MediaLocation::Vault(vault) => { + write!(f, "vault-{}", vault) + } + } + } +} + +impl std::str::FromStr for MediaLocation { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s == "offline" { + return Ok(MediaLocation::Offline); + } + if let Some(changer) = s.strip_prefix("online-") { + return Ok(MediaLocation::Online(changer.to_string())); + } + if let Some(vault) = s.strip_prefix("vault-") { + return Ok(MediaLocation::Vault(vault.to_string())); + } + + bail!("MediaLocation parse error"); + } +} diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs new file mode 100644 index 00000000..53e46788 --- /dev/null +++ b/pbs-api-types/src/tape/media_pool.rs @@ -0,0 +1,161 @@ +//! Types for tape media pool API +//! +//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums, +//! so we cannot use them directly for the API. Instead, we represent +//! them as String. + +use std::str::FromStr; + +use anyhow::Error; +use serde::{Deserialize, Serialize}; + +use proxmox::api::{ + api, + schema::{Schema, StringSchema, ApiStringFormat, Updater}, +}; + +use pbs_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; + +use crate::{ + PROXMOX_SAFE_ID_FORMAT, + SINGLE_LINE_COMMENT_FORMAT, + SINGLE_LINE_COMMENT_SCHEMA, + TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, +}; + +pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(2) + .max_length(32) + .schema(); + +pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new( + "Media set naming template (may contain strftime() time format specifications).") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(2) + .max_length(64) + .schema(); + +pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = + ApiStringFormat::VerifyFn(|s| { MediaSetPolicy::from_str(s)?; Ok(()) }); + +pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = StringSchema::new( + "Media set allocation policy ('continue', 'always', or a calendar event).") + .format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT) + .schema(); + +/// Media set allocation policy +pub enum MediaSetPolicy { + /// Try to use the current media set + ContinueCurrent, + /// Each backup job creates a new media set + AlwaysCreate, + /// Create a new set when the specified CalendarEvent triggers + CreateAt(CalendarEvent), +} + +impl std::str::FromStr for MediaSetPolicy { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s == "continue" { + return Ok(MediaSetPolicy::ContinueCurrent); + } + if s == "always" { + return Ok(MediaSetPolicy::AlwaysCreate); + } + + let event = parse_calendar_event(s)?; + + Ok(MediaSetPolicy::CreateAt(event)) + } +} + +pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = + ApiStringFormat::VerifyFn(|s| { RetentionPolicy::from_str(s)?; Ok(()) }); + +pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = StringSchema::new( + "Media retention policy ('overwrite', 'keep', or time span).") + .format(&MEDIA_RETENTION_POLICY_FORMAT) + .schema(); + +/// Media retention Policy +pub enum RetentionPolicy { + /// Always overwrite media + OverwriteAlways, + /// Protect data for the timespan specified + ProtectFor(TimeSpan), + /// Never overwrite data + KeepForever, +} + +impl std::str::FromStr for RetentionPolicy { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s == "overwrite" { + return Ok(RetentionPolicy::OverwriteAlways); + } + if s == "keep" { + return Ok(RetentionPolicy::KeepForever); + } + + let time_span = parse_time_span(s)?; + + Ok(RetentionPolicy::ProtectFor(time_span)) + } +} + +#[api( + properties: { + name: { + schema: MEDIA_POOL_NAME_SCHEMA, + }, + allocation: { + schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA, + optional: true, + }, + retention: { + schema: MEDIA_RETENTION_POLICY_SCHEMA, + optional: true, + }, + template: { + schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA, + optional: true, + }, + encrypt: { + schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, + optional: true, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Serialize,Deserialize,Updater)] +/// Media pool configuration +pub struct MediaPoolConfig { + /// The pool name + #[updater(skip)] + pub name: String, + /// Media Set allocation policy + #[serde(skip_serializing_if="Option::is_none")] + pub allocation: Option, + /// Media retention policy + #[serde(skip_serializing_if="Option::is_none")] + pub retention: Option, + /// Media set naming template (default "%c") + /// + /// The template is UTF8 text, and can include strftime time + /// format specifications. + #[serde(skip_serializing_if="Option::is_none")] + pub template: Option, + /// Encryption key fingerprint + /// + /// If set, encrypt all data using the specified key. + #[serde(skip_serializing_if="Option::is_none")] + pub encrypt: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, +} diff --git a/pbs-api-types/src/tape/media_status.rs b/pbs-api-types/src/tape/media_status.rs new file mode 100644 index 00000000..5a3bff96 --- /dev/null +++ b/pbs-api-types/src/tape/media_status.rs @@ -0,0 +1,21 @@ +use ::serde::{Deserialize, Serialize}; + +use proxmox::api::api; + +#[api()] +/// Media status +#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Media Status +pub enum MediaStatus { + /// Media is ready to be written + Writable, + /// Media is full (contains data) + Full, + /// Media is marked as unknown, needs rescan + Unknown, + /// Media is marked as damaged + Damaged, + /// Media is marked as retired + Retired, +} diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs new file mode 100644 index 00000000..a77be7f7 --- /dev/null +++ b/pbs-api-types/src/tape/mod.rs @@ -0,0 +1,94 @@ +//! Types for tape backup API + +mod device; +pub use device::*; + +mod changer; +pub use changer::*; + +mod drive; +pub use drive::*; + +mod media_pool; +pub use media_pool::*; + +mod media_status; +pub use media_status::*; + +mod media_location; + +pub use media_location::*; + +mod media; +pub use media::*; + +use ::serde::{Deserialize, Serialize}; + +use proxmox::api::api; +use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat}; +use proxmox::tools::Uuid; + +use proxmox::const_regex; + +use crate::{ + FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, +}; + +const_regex!{ + pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$"); +} + +pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX); + +pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new( + "Tape encryption key fingerprint (sha256)." +) + .format(&FINGERPRINT_SHA256_FORMAT) + .schema(); + +pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new( + "A snapshot in the format: 'store:type/id/time") + .format(&TAPE_RESTORE_SNAPSHOT_FORMAT) + .type_text("store:type/id/time") + .schema(); + +#[api( + properties: { + pool: { + schema: MEDIA_POOL_NAME_SCHEMA, + optional: true, + }, + "label-text": { + schema: MEDIA_LABEL_SCHEMA, + optional: true, + }, + "media": { + schema: MEDIA_UUID_SCHEMA, + optional: true, + }, + "media-set": { + schema: MEDIA_SET_UUID_SCHEMA, + optional: true, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + optional: true, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all="kebab-case")] +/// Content list filter parameters +pub struct MediaContentListFilter { + pub pool: Option, + pub label_text: Option, + pub media: Option, + pub media_set: Option, + pub backup_type: Option, + pub backup_id: Option, +} From 8fe018cfd89de75dff6c7672de2f8e5440343511 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 6 Sep 2021 10:19:29 +0200 Subject: [PATCH 028/299] move Kdf and KeyInfo to pbs_api_types workspace --- pbs-api-types/src/key_derivation.rs | 56 +++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 3 ++ 2 files changed, 59 insertions(+) create mode 100644 pbs-api-types/src/key_derivation.rs diff --git a/pbs-api-types/src/key_derivation.rs b/pbs-api-types/src/key_derivation.rs new file mode 100644 index 00000000..9a53130c --- /dev/null +++ b/pbs-api-types/src/key_derivation.rs @@ -0,0 +1,56 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; + +use crate::CERT_FINGERPRINT_SHA256_SCHEMA; + +#[api(default: "scrypt")] +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +/// Key derivation function for password protected encryption keys. +pub enum Kdf { + /// Do not encrypt the key. + None, + /// Encrypt they key with a password using SCrypt. + Scrypt, + /// Encrtypt the Key with a password using PBKDF2 + PBKDF2, +} + +impl Default for Kdf { + #[inline] + fn default() -> Self { + Kdf::Scrypt + } +} + +#[api( + properties: { + kdf: { + type: Kdf, + }, + fingerprint: { + schema: CERT_FINGERPRINT_SHA256_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Deserialize, Serialize)] +/// Encryption Key Information +pub struct KeyInfo { + /// Path to key (if stored in a file) + #[serde(skip_serializing_if="Option::is_none")] + pub path: Option, + pub kdf: Kdf, + /// Key creation time + pub created: i64, + /// Key modification time + pub modified: i64, + /// Key fingerprint + #[serde(skip_serializing_if="Option::is_none")] + pub fingerprint: Option, + /// Password hint + #[serde(skip_serializing_if="Option::is_none")] + pub hint: Option, +} + diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 427b2d9f..7b978e82 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -34,6 +34,9 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR { ); } +mod key_derivation; +pub use key_derivation::{Kdf, KeyInfo}; + #[macro_use] mod userid; pub use userid::Authid; From 7240e6374bde3058f0f41861e54d9338da982914 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 7 Sep 2021 12:32:01 +0200 Subject: [PATCH 029/299] moved tape_job.rs to pbs_config workspace --- pbs-api-types/src/jobs.rs | 393 ++++++++++++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 33 ++++ 2 files changed, 426 insertions(+) create mode 100644 pbs-api-types/src/jobs.rs diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs new file mode 100644 index 00000000..bbf684c7 --- /dev/null +++ b/pbs-api-types/src/jobs.rs @@ -0,0 +1,393 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::const_regex; + +use proxmox::api::{api, schema::*}; + +use crate::{ + Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, + SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA, +}; + +const_regex!{ + + /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' + pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); + /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID' + pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); +} + +pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.") + .max_length(256) + .schema(); + +pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( + "Run sync job at specified schedule.") + .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .type_text("") + .schema(); + +pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( + "Run garbage collection job at specified schedule.") + .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .type_text("") + .schema(); + +pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( + "Run prune job at specified schedule.") + .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .type_text("") + .schema(); + +pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( + "Run verify job at specified schedule.") + .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .type_text("") + .schema(); + +pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( + "Delete vanished backups. This remove the local copy if the remote backup was deleted.") + .default(true) + .schema(); + +#[api( + properties: { + "next-run": { + description: "Estimated time of the next run (UNIX epoch).", + optional: true, + type: Integer, + }, + "last-run-state": { + description: "Result of the last run.", + optional: true, + type: String, + }, + "last-run-upid": { + description: "Task UPID of the last run.", + optional: true, + type: String, + }, + "last-run-endtime": { + description: "Endtime of the last run.", + optional: true, + type: Integer, + }, + } +)] +#[derive(Serialize,Deserialize,Default)] +#[serde(rename_all="kebab-case")] +/// Job Scheduling Status +pub struct JobScheduleStatus { + #[serde(skip_serializing_if="Option::is_none")] + pub next_run: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub last_run_state: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub last_run_upid: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub last_run_endtime: Option, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// When do we send notifications +pub enum Notify { + /// Never send notification + Never, + /// Send notifications for failed and successful jobs + Always, + /// Send notifications for failed jobs only + Error, +} + +#[api( + properties: { + gc: { + type: Notify, + optional: true, + }, + verify: { + type: Notify, + optional: true, + }, + sync: { + type: Notify, + optional: true, + }, + }, +)] +#[derive(Debug, Serialize, Deserialize)] +/// Datastore notify settings +pub struct DatastoreNotify { + /// Garbage collection settings + pub gc: Option, + /// Verify job setting + pub verify: Option, + /// Sync job setting + pub sync: Option, +} + +pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( + "Datastore notification setting") + .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA)) + .schema(); + +pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( + "Do not verify backups that are already verified if their verification is not outdated.") + .default(true) + .schema(); + +pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( + "Days after that a verification becomes outdated") + .minimum(1) + .schema(); + +#[api( + properties: { + id: { + schema: JOB_ID_SCHEMA, + }, + store: { + schema: DATASTORE_SCHEMA, + }, + "ignore-verified": { + optional: true, + schema: IGNORE_VERIFIED_BACKUPS_SCHEMA, + }, + "outdated-after": { + optional: true, + schema: VERIFICATION_OUTDATED_AFTER_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + schedule: { + optional: true, + schema: VERIFICATION_SCHEDULE_SCHEMA, + }, + } +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all="kebab-case")] +/// Verification Job +pub struct VerificationJobConfig { + /// unique ID to address this job + pub id: String, + /// the datastore ID this verificaiton job affects + pub store: String, + #[serde(skip_serializing_if="Option::is_none")] + /// if not set to false, check the age of the last snapshot verification to filter + /// out recent ones, depending on 'outdated_after' configuration. + pub ignore_verified: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. + pub outdated_after: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// when to schedule this job in calendar event notation + pub schedule: Option, +} + +#[api( + properties: { + config: { + type: VerificationJobConfig, + }, + status: { + type: JobScheduleStatus, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all="kebab-case")] +/// Status of Verification Job +pub struct VerificationJobStatus { + #[serde(flatten)] + pub config: VerificationJobConfig, + #[serde(flatten)] + pub status: JobScheduleStatus, +} + +#[api( + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + pool: { + schema: MEDIA_POOL_NAME_SCHEMA, + }, + drive: { + schema: DRIVE_NAME_SCHEMA, + }, + "eject-media": { + description: "Eject media upon job completion.", + type: bool, + optional: true, + }, + "export-media-set": { + description: "Export media set upon job completion.", + type: bool, + optional: true, + }, + "latest-only": { + description: "Backup latest snapshots only.", + type: bool, + optional: true, + }, + "notify-user": { + optional: true, + type: Userid, + }, + } +)] +#[derive(Serialize,Deserialize,Clone)] +#[serde(rename_all="kebab-case")] +/// Tape Backup Job Setup +pub struct TapeBackupJobSetup { + pub store: String, + pub pool: String, + pub drive: String, + #[serde(skip_serializing_if="Option::is_none")] + pub eject_media: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub export_media_set: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub latest_only: Option, + /// Send job email notification to this user + #[serde(skip_serializing_if="Option::is_none")] + pub notify_user: Option, +} + +#[api( + properties: { + id: { + schema: JOB_ID_SCHEMA, + }, + setup: { + type: TapeBackupJobSetup, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + schedule: { + optional: true, + schema: SYNC_SCHEDULE_SCHEMA, + }, + } +)] +#[derive(Serialize,Deserialize,Clone)] +#[serde(rename_all="kebab-case")] +/// Tape Backup Job +pub struct TapeBackupJobConfig { + pub id: String, + #[serde(flatten)] + pub setup: TapeBackupJobSetup, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub schedule: Option, +} + +#[api( + properties: { + config: { + type: TapeBackupJobConfig, + }, + status: { + type: JobScheduleStatus, + }, + }, +)] +#[derive(Serialize,Deserialize)] +#[serde(rename_all="kebab-case")] +/// Status of Tape Backup Job +pub struct TapeBackupJobStatus { + #[serde(flatten)] + pub config: TapeBackupJobConfig, + #[serde(flatten)] + pub status: JobScheduleStatus, + /// Next tape used (best guess) + #[serde(skip_serializing_if="Option::is_none")] + pub next_media_label: Option, +} + +#[api( + properties: { + id: { + schema: JOB_ID_SCHEMA, + }, + store: { + schema: DATASTORE_SCHEMA, + }, + "owner": { + type: Authid, + optional: true, + }, + remote: { + schema: REMOTE_ID_SCHEMA, + }, + "remote-store": { + schema: DATASTORE_SCHEMA, + }, + "remove-vanished": { + schema: REMOVE_VANISHED_BACKUPS_SCHEMA, + optional: true, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + schedule: { + optional: true, + schema: SYNC_SCHEDULE_SCHEMA, + }, + } +)] +#[derive(Serialize,Deserialize,Clone)] +#[serde(rename_all="kebab-case")] +/// Sync Job +pub struct SyncJobConfig { + pub id: String, + pub store: String, + #[serde(skip_serializing_if="Option::is_none")] + pub owner: Option, + pub remote: String, + pub remote_store: String, + #[serde(skip_serializing_if="Option::is_none")] + pub remove_vanished: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub schedule: Option, +} + +#[api( + properties: { + config: { + type: SyncJobConfig, + }, + status: { + type: JobScheduleStatus, + }, + }, +)] + +#[derive(Serialize,Deserialize)] +#[serde(rename_all="kebab-case")] +/// Status of Sync Job +pub struct SyncJobStatus { + #[serde(flatten)] + pub config: SyncJobConfig, + #[serde(flatten)] + pub status: JobScheduleStatus, +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 7b978e82..055907cb 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -34,6 +34,9 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR { ); } +mod jobs; +pub use jobs::*; + mod key_derivation; pub use key_derivation::{Kdf, KeyInfo}; @@ -667,3 +670,33 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { &TaskListItem::API_SCHEMA, ).schema(), }; + +#[api()] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +/// Describes a package for which an update is available. +pub struct APTUpdateInfo { + /// Package name + pub package: String, + /// Package title + pub title: String, + /// Package architecture + pub arch: String, + /// Human readable package description + pub description: String, + /// New version to be updated to + pub version: String, + /// Old version currently installed + pub old_version: String, + /// Package origin + pub origin: String, + /// Package priority in human-readable form + pub priority: String, + /// Package section + pub section: String, + /// URL under which the package's changelog can be retrieved + pub change_log_url: String, + /// Custom extra field for additional package information + #[serde(skip_serializing_if="Option::is_none")] + pub extra_info: Option, +} From 7760d5679c9069e2d3509c05bcf9034525579e27 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 8 Sep 2021 08:28:09 +0200 Subject: [PATCH 030/299] sync job cleanup: use Updater/flatten --- pbs-api-types/src/jobs.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index bbf684c7..d345ad96 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -353,10 +353,11 @@ pub struct TapeBackupJobStatus { }, } )] -#[derive(Serialize,Deserialize,Clone)] +#[derive(Serialize,Deserialize,Clone,Updater)] #[serde(rename_all="kebab-case")] /// Sync Job pub struct SyncJobConfig { + #[updater(skip)] pub id: String, pub store: String, #[serde(skip_serializing_if="Option::is_none")] From 5f13dcc5fcc2da79a7aade7eb16a904746e6e40f Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 8 Sep 2021 08:40:32 +0200 Subject: [PATCH 031/299] verify job cleanup: use Updater/flatten --- pbs-api-types/src/jobs.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index d345ad96..3396e39e 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -175,11 +175,12 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( }, } )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize,Deserialize,Updater)] #[serde(rename_all="kebab-case")] /// Verification Job pub struct VerificationJobConfig { /// unique ID to address this job + #[updater(skip)] pub id: String, /// the datastore ID this verificaiton job affects pub store: String, From 3d9b2c8fd5fd6cecb8ced07f91981e1f8da98509 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 8 Sep 2021 08:55:25 +0200 Subject: [PATCH 032/299] tape job cleanup: user Updater --- pbs-api-types/src/jobs.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 3396e39e..a96fcc10 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -250,7 +250,7 @@ pub struct VerificationJobStatus { }, } )] -#[derive(Serialize,Deserialize,Clone)] +#[derive(Serialize,Deserialize,Clone,Updater)] #[serde(rename_all="kebab-case")] /// Tape Backup Job Setup pub struct TapeBackupJobSetup { @@ -286,10 +286,11 @@ pub struct TapeBackupJobSetup { }, } )] -#[derive(Serialize,Deserialize,Clone)] +#[derive(Serialize,Deserialize,Clone,Updater)] #[serde(rename_all="kebab-case")] /// Tape Backup Job pub struct TapeBackupJobConfig { + #[updater(skip)] pub id: String, #[serde(flatten)] pub setup: TapeBackupJobSetup, From 56d4dc1034ebe3829db2f392ebf697221aac624e Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 8 Sep 2021 09:19:35 +0200 Subject: [PATCH 033/299] changer config cleanup: use Updater --- pbs-api-types/src/tape/changer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs index 24f529df..e36eb32c 100644 --- a/pbs-api-types/src/tape/changer.rs +++ b/pbs-api-types/src/tape/changer.rs @@ -10,6 +10,7 @@ use proxmox::api::{ ArraySchema, IntegerSchema, StringSchema, + Updater, }, }; @@ -62,10 +63,11 @@ Import/Export, i.e. any media in those slots are considered to be }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize,Deserialize,Updater)] #[serde(rename_all = "kebab-case")] /// SCSI tape changer pub struct ScsiTapeChanger { + #[updater(skip)] pub name: String, pub path: String, #[serde(skip_serializing_if="Option::is_none")] From db1012b5aa379e0b23e10fe5752b67e9937996d1 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 8 Sep 2021 12:22:48 +0200 Subject: [PATCH 034/299] move network config to pbs_config workspace --- pbs-api-types/src/lib.rs | 3 + pbs-api-types/src/network.rs | 308 +++++++++++++++++++++++++++++++++++ 2 files changed, 311 insertions(+) create mode 100644 pbs-api-types/src/network.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 055907cb..62ded91d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -40,6 +40,9 @@ pub use jobs::*; mod key_derivation; pub use key_derivation::{Kdf, KeyInfo}; +mod network; +pub use network::*; + #[macro_use] mod userid; pub use userid::Authid; diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs new file mode 100644 index 00000000..d3a8e43c --- /dev/null +++ b/pbs-api-types/src/network.rs @@ -0,0 +1,308 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::{api, schema::*}; + +use crate::{ + PROXMOX_SAFE_ID_REGEX, + IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT, + CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT, +}; + +pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); + +pub const IP_V4_SCHEMA: Schema = + StringSchema::new("IPv4 address.") + .format(&IP_V4_FORMAT) + .max_length(15) + .schema(); + +pub const IP_V6_SCHEMA: Schema = + StringSchema::new("IPv6 address.") + .format(&IP_V6_FORMAT) + .max_length(39) + .schema(); + +pub const IP_SCHEMA: Schema = + StringSchema::new("IP (IPv4 or IPv6) address.") + .format(&IP_FORMAT) + .max_length(39) + .schema(); + +pub const CIDR_V4_SCHEMA: Schema = + StringSchema::new("IPv4 address with netmask (CIDR notation).") + .format(&CIDR_V4_FORMAT) + .max_length(18) + .schema(); + +pub const CIDR_V6_SCHEMA: Schema = + StringSchema::new("IPv6 address with netmask (CIDR notation).") + .format(&CIDR_V6_FORMAT) + .max_length(43) + .schema(); + +pub const CIDR_SCHEMA: Schema = + StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).") + .format(&CIDR_FORMAT) + .max_length(43) + .schema(); + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Interface configuration method +pub enum NetworkConfigMethod { + /// Configuration is done manually using other tools + Manual, + /// Define interfaces with statically allocated addresses. + Static, + /// Obtain an address via DHCP + DHCP, + /// Define the loopback interface. + Loopback, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +#[allow(non_camel_case_types)] +#[repr(u8)] +/// Linux Bond Mode +pub enum LinuxBondMode { + /// Round-robin policy + balance_rr = 0, + /// Active-backup policy + active_backup = 1, + /// XOR policy + balance_xor = 2, + /// Broadcast policy + broadcast = 3, + /// IEEE 802.3ad Dynamic link aggregation + #[serde(rename = "802.3ad")] + ieee802_3ad = 4, + /// Adaptive transmit load balancing + balance_tlb = 5, + /// Adaptive load balancing + balance_alb = 6, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +#[allow(non_camel_case_types)] +#[repr(u8)] +/// Bond Transmit Hash Policy for LACP (802.3ad) +pub enum BondXmitHashPolicy { + /// Layer 2 + layer2 = 0, + /// Layer 2+3 + #[serde(rename = "layer2+3")] + layer2_3 = 1, + /// Layer 3+4 + #[serde(rename = "layer3+4")] + layer3_4 = 2, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Network interface type +pub enum NetworkInterfaceType { + /// Loopback + Loopback, + /// Physical Ethernet device + Eth, + /// Linux Bridge + Bridge, + /// Linux Bond + Bond, + /// Linux VLAN (eth.10) + Vlan, + /// Interface Alias (eth:1) + Alias, + /// Unknown interface type + Unknown, +} + +pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.") + .format(&NETWORK_INTERFACE_FORMAT) + .min_length(1) + .max_length(libc::IFNAMSIZ-1) + .schema(); + +pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA) + .schema(); + +pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new( + "A list of network devices, comma separated.") + .format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA)) + .schema(); + +#[api( + properties: { + name: { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + }, + "type": { + type: NetworkInterfaceType, + }, + method: { + type: NetworkConfigMethod, + optional: true, + }, + method6: { + type: NetworkConfigMethod, + optional: true, + }, + cidr: { + schema: CIDR_V4_SCHEMA, + optional: true, + }, + cidr6: { + schema: CIDR_V6_SCHEMA, + optional: true, + }, + gateway: { + schema: IP_V4_SCHEMA, + optional: true, + }, + gateway6: { + schema: IP_V6_SCHEMA, + optional: true, + }, + options: { + description: "Option list (inet)", + type: Array, + items: { + description: "Optional attribute line.", + type: String, + }, + }, + options6: { + description: "Option list (inet6)", + type: Array, + items: { + description: "Optional attribute line.", + type: String, + }, + }, + comments: { + description: "Comments (inet, may span multiple lines)", + type: String, + optional: true, + }, + comments6: { + description: "Comments (inet6, may span multiple lines)", + type: String, + optional: true, + }, + bridge_ports: { + schema: NETWORK_INTERFACE_ARRAY_SCHEMA, + optional: true, + }, + slaves: { + schema: NETWORK_INTERFACE_ARRAY_SCHEMA, + optional: true, + }, + bond_mode: { + type: LinuxBondMode, + optional: true, + }, + "bond-primary": { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + optional: true, + }, + bond_xmit_hash_policy: { + type: BondXmitHashPolicy, + optional: true, + }, + } +)] +#[derive(Debug, Serialize, Deserialize)] +/// Network Interface configuration +pub struct Interface { + /// Autostart interface + #[serde(rename = "autostart")] + pub autostart: bool, + /// Interface is active (UP) + pub active: bool, + /// Interface name + pub name: String, + /// Interface type + #[serde(rename = "type")] + pub interface_type: NetworkInterfaceType, + #[serde(skip_serializing_if="Option::is_none")] + pub method: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub method6: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// IPv4 address with netmask + pub cidr: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// IPv4 gateway + pub gateway: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// IPv6 address with netmask + pub cidr6: Option, + #[serde(skip_serializing_if="Option::is_none")] + /// IPv6 gateway + pub gateway6: Option, + + #[serde(skip_serializing_if="Vec::is_empty")] + pub options: Vec, + #[serde(skip_serializing_if="Vec::is_empty")] + pub options6: Vec, + + #[serde(skip_serializing_if="Option::is_none")] + pub comments: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub comments6: Option, + + #[serde(skip_serializing_if="Option::is_none")] + /// Maximum Transmission Unit + pub mtu: Option, + + #[serde(skip_serializing_if="Option::is_none")] + pub bridge_ports: Option>, + /// Enable bridge vlan support. + #[serde(skip_serializing_if="Option::is_none")] + pub bridge_vlan_aware: Option, + + #[serde(skip_serializing_if="Option::is_none")] + pub slaves: Option>, + #[serde(skip_serializing_if="Option::is_none")] + pub bond_mode: Option, + #[serde(skip_serializing_if="Option::is_none")] + #[serde(rename = "bond-primary")] + pub bond_primary: Option, + pub bond_xmit_hash_policy: Option, +} + +impl Interface { + pub fn new(name: String) -> Self { + Self { + name, + interface_type: NetworkInterfaceType::Unknown, + autostart: false, + active: false, + method: None, + method6: None, + cidr: None, + gateway: None, + cidr6: None, + gateway6: None, + options: Vec::new(), + options6: Vec::new(), + comments: None, + comments6: None, + mtu: None, + bridge_ports: None, + bridge_vlan_aware: None, + slaves: None, + bond_mode: None, + bond_primary: None, + bond_xmit_hash_policy: None, + } + } +} From d48f612bec4687c142bb998ec691647653f97c3d Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 9 Sep 2021 10:32:44 +0200 Subject: [PATCH 035/299] move acl to pbs_config workspaces, pbs_api_types cleanups --- pbs-api-types/src/acl.rs | 284 +++++++++++++++++++++ pbs-api-types/src/datastore.rs | 363 ++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 448 ++++++++++----------------------- pbs-api-types/src/zfs.rs | 81 ++++++ 4 files changed, 862 insertions(+), 314 deletions(-) create mode 100644 pbs-api-types/src/acl.rs create mode 100644 pbs-api-types/src/datastore.rs create mode 100644 pbs-api-types/src/zfs.rs diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs new file mode 100644 index 00000000..140ff58e --- /dev/null +++ b/pbs-api-types/src/acl.rs @@ -0,0 +1,284 @@ +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; +use serde::de::{value, IntoDeserializer}; + +use proxmox::api::api; +use proxmox::api::schema::{ + ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema, +}; +use proxmox::{constnamedbitmap, const_regex}; + +const_regex! { + pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$"); +} + +// define Privilege bitfield + +constnamedbitmap! { + /// Contains a list of privilege name to privilege value mappings. + /// + /// The names are used when displaying/persisting privileges anywhere, the values are used to + /// allow easy matching of privileges as bitflags. + PRIVILEGES: u64 => { + /// Sys.Audit allows knowing about the system and its status + PRIV_SYS_AUDIT("Sys.Audit"); + /// Sys.Modify allows modifying system-level configuration + PRIV_SYS_MODIFY("Sys.Modify"); + /// Sys.Modify allows to poweroff/reboot/.. the system + PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement"); + + /// Datastore.Audit allows knowing about a datastore, + /// including reading the configuration entry and listing its contents + PRIV_DATASTORE_AUDIT("Datastore.Audit"); + /// Datastore.Allocate allows creating or deleting datastores + PRIV_DATASTORE_ALLOCATE("Datastore.Allocate"); + /// Datastore.Modify allows modifying a datastore and its contents + PRIV_DATASTORE_MODIFY("Datastore.Modify"); + /// Datastore.Read allows reading arbitrary backup contents + PRIV_DATASTORE_READ("Datastore.Read"); + /// Allows verifying a datastore + PRIV_DATASTORE_VERIFY("Datastore.Verify"); + + /// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots, + /// but also requires backup ownership + PRIV_DATASTORE_BACKUP("Datastore.Backup"); + /// Datastore.Prune allows deleting snapshots, + /// but also requires backup ownership + PRIV_DATASTORE_PRUNE("Datastore.Prune"); + + /// Permissions.Modify allows modifying ACLs + PRIV_PERMISSIONS_MODIFY("Permissions.Modify"); + + /// Remote.Audit allows reading remote.cfg and sync.cfg entries + PRIV_REMOTE_AUDIT("Remote.Audit"); + /// Remote.Modify allows modifying remote.cfg + PRIV_REMOTE_MODIFY("Remote.Modify"); + /// Remote.Read allows reading data from a configured `Remote` + PRIV_REMOTE_READ("Remote.Read"); + + /// Sys.Console allows access to the system's console + PRIV_SYS_CONSOLE("Sys.Console"); + + /// Tape.Audit allows reading tape backup configuration and status + PRIV_TAPE_AUDIT("Tape.Audit"); + /// Tape.Modify allows modifying tape backup configuration + PRIV_TAPE_MODIFY("Tape.Modify"); + /// Tape.Write allows writing tape media + PRIV_TAPE_WRITE("Tape.Write"); + /// Tape.Read allows reading tape backup configuration and media contents + PRIV_TAPE_READ("Tape.Read"); + + /// Realm.Allocate allows viewing, creating, modifying and deleting realms + PRIV_REALM_ALLOCATE("Realm.Allocate"); + } +} + +/// Admin always has all privileges. It can do everything except a few actions +/// which are limited to the 'root@pam` superuser +pub const ROLE_ADMIN: u64 = std::u64::MAX; + +/// NoAccess can be used to remove privileges from specific (sub-)paths +pub const ROLE_NO_ACCESS: u64 = 0; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Audit can view configuration and status information, but not modify it. +pub const ROLE_AUDIT: u64 = 0 + | PRIV_SYS_AUDIT + | PRIV_DATASTORE_AUDIT; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Datastore.Admin can do anything on the datastore. +pub const ROLE_DATASTORE_ADMIN: u64 = 0 + | PRIV_DATASTORE_AUDIT + | PRIV_DATASTORE_MODIFY + | PRIV_DATASTORE_READ + | PRIV_DATASTORE_VERIFY + | PRIV_DATASTORE_BACKUP + | PRIV_DATASTORE_PRUNE; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Datastore.Reader can read/verify datastore content and do restore +pub const ROLE_DATASTORE_READER: u64 = 0 + | PRIV_DATASTORE_AUDIT + | PRIV_DATASTORE_VERIFY + | PRIV_DATASTORE_READ; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Datastore.Backup can do backup and restore, but no prune. +pub const ROLE_DATASTORE_BACKUP: u64 = 0 + | PRIV_DATASTORE_BACKUP; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Datastore.PowerUser can do backup, restore, and prune. +pub const ROLE_DATASTORE_POWERUSER: u64 = 0 + | PRIV_DATASTORE_PRUNE + | PRIV_DATASTORE_BACKUP; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Datastore.Audit can audit the datastore. +pub const ROLE_DATASTORE_AUDIT: u64 = 0 + | PRIV_DATASTORE_AUDIT; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.Audit can audit the remote +pub const ROLE_REMOTE_AUDIT: u64 = 0 + | PRIV_REMOTE_AUDIT; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.Admin can do anything on the remote. +pub const ROLE_REMOTE_ADMIN: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_MODIFY + | PRIV_REMOTE_READ; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.SyncOperator can do read and prune on the remote. +pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_READ; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Tape.Audit can audit the tape backup configuration and media content +pub const ROLE_TAPE_AUDIT: u64 = 0 + | PRIV_TAPE_AUDIT; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Tape.Admin can do anything on the tape backup +pub const ROLE_TAPE_ADMIN: u64 = 0 + | PRIV_TAPE_AUDIT + | PRIV_TAPE_MODIFY + | PRIV_TAPE_READ + | PRIV_TAPE_WRITE; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Tape.Operator can do tape backup and restore (but no configuration changes) +pub const ROLE_TAPE_OPERATOR: u64 = 0 + | PRIV_TAPE_AUDIT + | PRIV_TAPE_READ + | PRIV_TAPE_WRITE; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Tape.Reader can do read and inspect tape content +pub const ROLE_TAPE_READER: u64 = 0 + | PRIV_TAPE_AUDIT + | PRIV_TAPE_READ; + +/// NoAccess can be used to remove privileges from specific (sub-)paths +pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess"; + +#[api( + type_text: "", +)] +#[repr(u64)] +#[derive(Serialize, Deserialize)] +/// Enum representing roles via their [PRIVILEGES] combination. +/// +/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a +/// single, unique `u64` value that is used in this enum definition. +pub enum Role { + /// Administrator + Admin = ROLE_ADMIN, + /// Auditor + Audit = ROLE_AUDIT, + /// Disable Access + NoAccess = ROLE_NO_ACCESS, + /// Datastore Administrator + DatastoreAdmin = ROLE_DATASTORE_ADMIN, + /// Datastore Reader (inspect datastore content and do restores) + DatastoreReader = ROLE_DATASTORE_READER, + /// Datastore Backup (backup and restore owned backups) + DatastoreBackup = ROLE_DATASTORE_BACKUP, + /// Datastore PowerUser (backup, restore and prune owned backup) + DatastorePowerUser = ROLE_DATASTORE_POWERUSER, + /// Datastore Auditor + DatastoreAudit = ROLE_DATASTORE_AUDIT, + /// Remote Auditor + RemoteAudit = ROLE_REMOTE_AUDIT, + /// Remote Administrator + RemoteAdmin = ROLE_REMOTE_ADMIN, + /// Syncronisation Opertator + RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, + /// Tape Auditor + TapeAudit = ROLE_TAPE_AUDIT, + /// Tape Administrator + TapeAdmin = ROLE_TAPE_ADMIN, + /// Tape Operator + TapeOperator = ROLE_TAPE_OPERATOR, + /// Tape Reader + TapeReader = ROLE_TAPE_READER, +} + + +impl FromStr for Role { + type Err = value::Error; + + fn from_str(s: &str) -> Result { + Self::deserialize(s.into_deserializer()) + } +} + +pub const ACL_PATH_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&ACL_PATH_REGEX); + +pub const ACL_PATH_SCHEMA: Schema = StringSchema::new( + "Access control path.") + .format(&ACL_PATH_FORMAT) + .min_length(1) + .max_length(128) + .schema(); + +pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new( + "Allow to propagate (inherit) permissions.") + .default(true) + .schema(); + +pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new( + "Type of 'ugid' property.") + .format(&ApiStringFormat::Enum(&[ + EnumEntry::new("user", "User"), + EnumEntry::new("group", "Group")])) + .schema(); + +#[api( + properties: { + propagate: { + schema: ACL_PROPAGATE_SCHEMA, + }, + path: { + schema: ACL_PATH_SCHEMA, + }, + ugid_type: { + schema: ACL_UGID_TYPE_SCHEMA, + }, + ugid: { + type: String, + description: "User or Group ID.", + }, + roleid: { + type: Role, + } + } +)] +#[derive(Serialize, Deserialize)] +/// ACL list entry. +pub struct AclListItem { + pub path: String, + pub ugid: String, + pub ugid_type: String, + pub propagate: bool, + pub roleid: String, +} diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs new file mode 100644 index 00000000..e20771ba --- /dev/null +++ b/pbs-api-types/src/datastore.rs @@ -0,0 +1,363 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; +use proxmox::api::schema::{ + ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema, + StringSchema, +}; + +use proxmox::const_regex; + +use crate::{ + PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID, + Fingerprint, Authid, +}; + +const_regex!{ + pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); + + pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); + + pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); + + pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); + + pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; + + pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + + pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); +} + +pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); + +pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .schema(); + +pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); + +pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") + .format(&BACKUP_ID_FORMAT) + .schema(); + +pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.") + .format(&ApiStringFormat::Enum(&[ + EnumEntry::new("vm", "Virtual Machine Backup"), + EnumEntry::new("ct", "Container Backup"), + EnumEntry::new("host", "Host Backup"), + ])) + .schema(); + +pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)") + .minimum(1_547_797_308) + .schema(); + +pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).") + .format(&CHUNK_DIGEST_FORMAT) + .schema(); + +pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX); + +pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.") + .format(&DATASTORE_MAP_FORMAT) + .min_length(3) + .max_length(65) + .type_text("(=)?") + .schema(); + +pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Datastore mapping list.", &DATASTORE_MAP_SCHEMA) + .schema(); + +pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new( + "A list of Datastore mappings (or single datastore), comma separated. \ + For example 'a=b,e' maps the source datastore 'a' to target 'b and \ + all other sources to the default 'e'. If no default is given, only the \ + specified sources are mapped.") + .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA)) + .schema(); + +#[api( + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a datastore. +pub struct DataStoreListItem { + pub store: String, + pub comment: Option, +} + +#[api( + properties: { + "filename": { + schema: BACKUP_ARCHIVE_NAME_SCHEMA, + }, + "crypt-mode": { + type: CryptMode, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about archive files inside a backup snapshot. +pub struct BackupContent { + pub filename: String, + /// Info if file is encrypted, signed, or neither. + #[serde(skip_serializing_if = "Option::is_none")] + pub crypt_mode: Option, + /// Archive size (from backup manifest). + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Result of a verify operation. +pub enum VerifyState { + /// Verification was successful + Ok, + /// Verification reported one or more errors + Failed, +} + +#[api( + properties: { + upid: { + type: UPID, + }, + state: { + type: VerifyState, + }, + }, +)] +#[derive(Serialize, Deserialize)] +/// Task properties. +pub struct SnapshotVerifyState { + /// UPID of the verify task + pub upid: UPID, + /// State of the verification. Enum. + pub state: VerifyState, +} + + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + comment: { + schema: SINGLE_LINE_COMMENT_SCHEMA, + optional: true, + }, + verification: { + type: SnapshotVerifyState, + optional: true, + }, + fingerprint: { + type: String, + optional: true, + }, + files: { + items: { + schema: BACKUP_ARCHIVE_NAME_SCHEMA + }, + }, + owner: { + type: Authid, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about backup snapshot. +pub struct SnapshotListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub backup_time: i64, + /// The first line from manifest "notes" + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// The result of the last run verify task + #[serde(skip_serializing_if = "Option::is_none")] + pub verification: Option, + /// Fingerprint of encryption key + #[serde(skip_serializing_if = "Option::is_none")] + pub fingerprint: Option, + /// List of contained archive files. + pub files: Vec, + /// Overall snapshot size (sum of all archive sizes). + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, + /// The owner of the snapshots group + #[serde(skip_serializing_if = "Option::is_none")] + pub owner: Option, +} + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "last-backup": { + schema: BACKUP_TIME_SCHEMA, + }, + "backup-count": { + type: Integer, + }, + files: { + items: { + schema: BACKUP_ARCHIVE_NAME_SCHEMA + }, + }, + owner: { + type: Authid, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a backup group. +pub struct GroupListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub last_backup: i64, + /// Number of contained snapshots + pub backup_count: u64, + /// List of contained archive files. + pub files: Vec, + /// The owner of group + #[serde(skip_serializing_if = "Option::is_none")] + pub owner: Option, + /// The first line from group "notes" + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} + +#[api( + properties: { + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Prune result. +pub struct PruneListItem { + pub backup_type: String, // enum + pub backup_id: String, + pub backup_time: i64, + /// Keep snapshot + pub keep: bool, +} + +#[api( + properties: { + ct: { + type: TypeCounts, + optional: true, + }, + host: { + type: TypeCounts, + optional: true, + }, + vm: { + type: TypeCounts, + optional: true, + }, + other: { + type: TypeCounts, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Default)] +/// Counts of groups/snapshots per BackupType. +pub struct Counts { + /// The counts for CT backups + pub ct: Option, + /// The counts for Host backups + pub host: Option, + /// The counts for VM backups + pub vm: Option, + /// The counts for other backup types + pub other: Option, +} + +#[api()] +#[derive(Serialize, Deserialize, Default)] +/// Backup Type group/snapshot counts. +pub struct TypeCounts { + /// The number of groups of the type. + pub groups: u64, + /// The number of snapshots of the type. + pub snapshots: u64, +} + + +pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of snapshots.", + &SnapshotListItem::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of archive files inside a backup snapshots.", + &BackupContent::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of backup groups.", + &GroupListItem::API_SCHEMA, + ).schema(), +}; + +pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of snapshots and a flag indicating if there are kept or removed.", + &PruneListItem::API_SCHEMA, + ).schema(), +}; diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 62ded91d..59bb6f6e 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,10 +1,11 @@ //! Basic API types used by most of the PBS code. use serde::{Deserialize, Serialize}; +use anyhow::bail; use proxmox::api::api; use proxmox::api::schema::{ - ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema, + ApiStringFormat, ApiType, ArraySchema, IntegerSchema, ReturnType, Schema, StringSchema, }; use proxmox::const_regex; @@ -34,6 +35,12 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR { ); } +mod acl; +pub use acl::*; + +mod datastore; +pub use datastore::*; + mod jobs; pub use jobs::*; @@ -73,6 +80,10 @@ pub use remote::*; mod tape; pub use tape::*; +mod zfs; +pub use zfs::*; + + #[rustfmt::skip] #[macro_use] mod local_macros { @@ -104,17 +115,7 @@ const_regex! { pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$"; - pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); - - pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); - - pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); - - pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); - - pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; - - pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ? pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; @@ -138,6 +139,7 @@ const_regex! { ); pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$"; + pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); } pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX); @@ -146,6 +148,38 @@ pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX); pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX); pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX); pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); +pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); +pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); +pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); +pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); +pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); +pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); +pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); + +pub const DNS_ALIAS_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); + +pub const SEARCH_DOMAIN_SCHEMA: Schema = + StringSchema::new("Search domain for host-name lookup.").schema(); + +pub const FIRST_DNS_SERVER_SCHEMA: Schema = + StringSchema::new("First name server IP address.") + .format(&IP_FORMAT) + .schema(); + +pub const SECOND_DNS_SERVER_SCHEMA: Schema = + StringSchema::new("Second name server IP address.") + .format(&IP_FORMAT) + .schema(); + +pub const THIRD_DNS_SERVER_SCHEMA: Schema = + StringSchema::new("Third name server IP address.") + .format(&IP_FORMAT) + .schema(); + +pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).") + .format(&HOSTNAME_FORMAT) + .schema(); pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX); @@ -157,24 +191,48 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr .format(&DNS_NAME_OR_IP_FORMAT) .schema(); -pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") - .format(&BACKUP_ID_FORMAT) - .schema(); -pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.") - .format(&ApiStringFormat::Enum(&[ - EnumEntry::new("vm", "Virtual Machine Backup"), - EnumEntry::new("ct", "Container Backup"), - EnumEntry::new("host", "Host Backup"), - ])) - .schema(); -pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)") - .minimum(1_547_797_308) +pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") + .format(&ApiStringFormat::VerifyFn(|node| { + if node == "localhost" || node == proxmox::tools::nodename() { + Ok(()) + } else { + bail!("no such node '{}'", node); + } + })) .schema(); -pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") - .format(&PROXMOX_SAFE_ID_FORMAT) +pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( + "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(2) + .max_length(64) + .schema(); + +pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/).") + .format(&BLOCKDEVICE_NAME_FORMAT) .min_length(3) - .max_length(32) + .max_length(64) + .schema(); + +pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA) + .schema(); + +pub const DISK_LIST_SCHEMA: Schema = StringSchema::new( + "A list of disk names, comma separated.") + .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) + .schema(); + +pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.") + .format(&PASSWORD_FORMAT) + .min_length(1) + .max_length(1024) + .schema(); + +pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.") + .format(&PASSWORD_FORMAT) + .min_length(5) + .max_length(64) .schema(); pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.") @@ -229,6 +287,16 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl .format(&SINGLE_LINE_COMMENT_FORMAT) .schema(); +pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.") + .format(&SUBSCRIPTION_KEY_FORMAT) + .min_length(15) + .max_length(16) + .schema(); + +pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.") + .max_length(256) + .schema(); + pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( "Prevent changes if current configuration file has different \ SHA256 digest. This can be used to prevent concurrent \ @@ -237,8 +305,6 @@ pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( .format(&PVE_CONFIG_DIGEST_FORMAT) .schema(); -pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); - /// API schema format definition for repository URLs pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); @@ -295,219 +361,8 @@ impl Default for GarbageCollectionStatus { } } -pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); - -pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); - -pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); - -pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .schema(); - // Complex type definitions -#[api( - properties: { - "filename": { - schema: BACKUP_ARCHIVE_NAME_SCHEMA, - }, - "crypt-mode": { - type: CryptMode, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Basic information about archive files inside a backup snapshot. -pub struct BackupContent { - pub filename: String, - /// Info if file is encrypted, signed, or neither. - #[serde(skip_serializing_if = "Option::is_none")] - pub crypt_mode: Option, - /// Archive size (from backup manifest). - #[serde(skip_serializing_if = "Option::is_none")] - pub size: Option, -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Result of a verify operation. -pub enum VerifyState { - /// Verification was successful - Ok, - /// Verification reported one or more errors - Failed, -} - -#[api( - properties: { - upid: { - type: UPID, - }, - state: { - type: VerifyState, - }, - }, -)] -#[derive(Serialize, Deserialize)] -/// Task properties. -pub struct SnapshotVerifyState { - /// UPID of the verify task - pub upid: UPID, - /// State of the verification. Enum. - pub state: VerifyState, -} - -#[api( - properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "backup-time": { - schema: BACKUP_TIME_SCHEMA, - }, - comment: { - schema: SINGLE_LINE_COMMENT_SCHEMA, - optional: true, - }, - verification: { - type: SnapshotVerifyState, - optional: true, - }, - fingerprint: { - type: String, - optional: true, - }, - files: { - items: { - schema: BACKUP_ARCHIVE_NAME_SCHEMA - }, - }, - owner: { - type: Authid, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Basic information about backup snapshot. -pub struct SnapshotListItem { - pub backup_type: String, // enum - pub backup_id: String, - pub backup_time: i64, - /// The first line from manifest "notes" - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// The result of the last run verify task - #[serde(skip_serializing_if = "Option::is_none")] - pub verification: Option, - /// Fingerprint of encryption key - #[serde(skip_serializing_if = "Option::is_none")] - pub fingerprint: Option, - /// List of contained archive files. - pub files: Vec, - /// Overall snapshot size (sum of all archive sizes). - #[serde(skip_serializing_if = "Option::is_none")] - pub size: Option, - /// The owner of the snapshots group - #[serde(skip_serializing_if = "Option::is_none")] - pub owner: Option, -} - -#[api( - properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "last-backup": { - schema: BACKUP_TIME_SCHEMA, - }, - "backup-count": { - type: Integer, - }, - files: { - items: { - schema: BACKUP_ARCHIVE_NAME_SCHEMA - }, - }, - owner: { - type: Authid, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a backup group. -pub struct GroupListItem { - pub backup_type: String, // enum - pub backup_id: String, - pub last_backup: i64, - /// Number of contained snapshots - pub backup_count: u64, - /// List of contained archive files. - pub files: Vec, - /// The owner of group - #[serde(skip_serializing_if = "Option::is_none")] - pub owner: Option, - /// The first line from group "notes" - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[api( - properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a datastore. -pub struct DataStoreListItem { - pub store: String, - pub comment: Option, -} - -#[api( - properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "backup-time": { - schema: BACKUP_TIME_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Prune result. -pub struct PruneListItem { - pub backup_type: String, // enum - pub backup_id: String, - pub backup_time: i64, - /// Keep snapshot - pub keep: bool, -} #[api()] #[derive(Default, Serialize, Deserialize)] @@ -521,49 +376,6 @@ pub struct StorageStatus { pub avail: u64, } -#[api()] -#[derive(Serialize, Deserialize, Default)] -/// Backup Type group/snapshot counts. -pub struct TypeCounts { - /// The number of groups of the type. - pub groups: u64, - /// The number of snapshots of the type. - pub snapshots: u64, -} - -#[api( - properties: { - ct: { - type: TypeCounts, - optional: true, - }, - host: { - type: TypeCounts, - optional: true, - }, - vm: { - type: TypeCounts, - optional: true, - }, - other: { - type: TypeCounts, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Default)] -/// Counts of groups/snapshots per BackupType. -pub struct Counts { - /// The counts for CT backups - pub ct: Option, - /// The counts for Host backups - pub host: Option, - /// The counts for VM backups - pub vm: Option, - /// The counts for other backup types - pub other: Option, -} - pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") .format(&SINGLE_LINE_COMMENT_FORMAT) .min_length(1) @@ -634,37 +446,6 @@ pub struct TaskListItem { pub status: Option, } -pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of snapshots.", - &SnapshotListItem::API_SCHEMA, - ).schema(), -}; - -pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of archive files inside a backup snapshots.", - &BackupContent::API_SCHEMA, - ).schema(), -}; - -pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of backup groups.", - &GroupListItem::API_SCHEMA, - ).schema(), -}; - -pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of snapshots and a flag indicating if there are kept or removed.", - &PruneListItem::API_SCHEMA, - ).schema(), -}; pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { optional: false, @@ -703,3 +484,42 @@ pub struct APTUpdateInfo { #[serde(skip_serializing_if="Option::is_none")] pub extra_info: Option, } + +#[api()] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub enum RRDMode { + /// Maximum + Max, + /// Average + Average, +} + + +#[api()] +#[repr(u64)] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum RRDTimeFrameResolution { + /// 1 min => last 70 minutes + Hour = 60, + /// 30 min => last 35 hours + Day = 60*30, + /// 3 hours => about 8 days + Week = 60*180, + /// 12 hours => last 35 days + Month = 60*720, + /// 1 week => last 490 days + Year = 60*10080, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Node Power command type. +pub enum NodePowerCommand { + /// Restart the server + Reboot, + /// Shutdown the server + Shutdown, +} diff --git a/pbs-api-types/src/zfs.rs b/pbs-api-types/src/zfs.rs new file mode 100644 index 00000000..517ca914 --- /dev/null +++ b/pbs-api-types/src/zfs.rs @@ -0,0 +1,81 @@ +use serde::{Deserialize, Serialize}; + +use proxmox::api::{api, schema::*}; + +use proxmox::const_regex; + +const_regex! { + pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$"; +} + +pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new( + "Pool sector size exponent.") + .minimum(9) + .maximum(16) + .default(12) + .schema(); + +pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name") + .format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX)) + .schema(); + +#[api(default: "On")] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// The ZFS compression algorithm to use. +pub enum ZfsCompressionType { + /// Gnu Zip + Gzip, + /// LZ4 + Lz4, + /// LZJB + Lzjb, + /// ZLE + Zle, + /// ZStd + ZStd, + /// Enable compression using the default algorithm. + On, + /// Disable compression. + Off, +} + +#[api()] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// The ZFS RAID level to use. +pub enum ZfsRaidLevel { + /// Single Disk + Single, + /// Mirror + Mirror, + /// Raid10 + Raid10, + /// RaidZ + RaidZ, + /// RaidZ2 + RaidZ2, + /// RaidZ3 + RaidZ3, +} + +#[api()] +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all="kebab-case")] +/// zpool list item +pub struct ZpoolListItem { + /// zpool name + pub name: String, + /// Health + pub health: String, + /// Total size + pub size: u64, + /// Used size + pub alloc: u64, + /// Free space + pub free: u64, + /// ZFS fragnentation level + pub frag: u64, + /// ZFS deduplication ratio + pub dedup: f64, +} From 99ac07d906054bd8bbf8090cd88c48ce4501a511 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 9 Sep 2021 13:14:28 +0200 Subject: [PATCH 036/299] cleanup User configuration: use Updater --- pbs-api-types/src/lib.rs | 5 +---- pbs-api-types/src/user.rs | 7 +++++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 59bb6f6e..03f184ea 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -61,10 +61,7 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN #[macro_use] mod user; -pub use user::{ApiToken, User, UserWithTokens}; -pub use user::{ - EMAIL_SCHEMA, ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, -}; +pub use user::*; pub mod upid; pub use upid::UPID; diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 9111ccea..8a7480ad 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -1,7 +1,9 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; -use proxmox::api::schema::{BooleanSchema, IntegerSchema, Schema, StringSchema}; +use proxmox::api::schema::{ + BooleanSchema, IntegerSchema, Schema, StringSchema, Updater, +}; use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA}; @@ -171,9 +173,10 @@ impl ApiToken { }, } )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize,Deserialize,Updater)] /// User properties. pub struct User { + #[updater(skip)] pub userid: Userid, #[serde(skip_serializing_if="Option::is_none")] pub comment: Option, From 086536e3fad9e5c9516d079bafb1a624d6ef7517 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 10 Sep 2021 08:40:58 +0200 Subject: [PATCH 037/299] move datastore config to pbs_config workspace --- pbs-api-types/src/datastore.rs | 135 ++++++++++++++++++++++++++++++++- pbs-api-types/src/lib.rs | 31 +------- 2 files changed, 134 insertions(+), 32 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index e20771ba..039cd71a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -3,14 +3,16 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; use proxmox::api::schema::{ ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema, - StringSchema, + StringSchema, Updater, }; use proxmox::const_regex; use crate::{ PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID, - Fingerprint, Authid, + Fingerprint, Userid, Authid, + GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA, + }; const_regex!{ @@ -31,6 +33,11 @@ const_regex!{ pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); +pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") + .min_length(1) + .max_length(4096) + .schema(); + pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.") .format(&PROXMOX_SAFE_ID_FORMAT) .schema(); @@ -84,6 +91,130 @@ pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new( .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA)) .schema(); +pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = + IntegerSchema::new("Number of hourly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = + IntegerSchema::new("Number of monthly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = + IntegerSchema::new("Number of weekly backups to keep.") + .minimum(1) + .schema(); + +pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = + IntegerSchema::new("Number of yearly backups to keep.") + .minimum(1) + .schema(); + +#[api( + properties: { + name: { + schema: DATASTORE_SCHEMA, + }, + path: { + schema: DIR_NAME_SCHEMA, + }, + "notify-user": { + optional: true, + type: Userid, + }, + "notify": { + optional: true, + schema: DATASTORE_NOTIFY_STRING_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + "gc-schedule": { + optional: true, + schema: GC_SCHEDULE_SCHEMA, + }, + "prune-schedule": { + optional: true, + schema: PRUNE_SCHEDULE_SCHEMA, + }, + "keep-last": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_LAST, + }, + "keep-hourly": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_HOURLY, + }, + "keep-daily": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_DAILY, + }, + "keep-weekly": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_WEEKLY, + }, + "keep-monthly": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_MONTHLY, + }, + "keep-yearly": { + optional: true, + schema: PRUNE_SCHEMA_KEEP_YEARLY, + }, + "verify-new": { + description: "If enabled, all new backups will be verified right after completion.", + optional: true, + type: bool, + }, + } +)] +#[derive(Serialize,Deserialize,Updater)] +#[serde(rename_all="kebab-case")] +/// Datastore configuration properties. +pub struct DataStoreConfig { + #[updater(skip)] + pub name: String, + #[updater(skip)] + pub path: String, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub gc_schedule: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub prune_schedule: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_last: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_hourly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_daily: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_weekly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_monthly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_yearly: Option, + /// If enabled, all backups will be verified right after completion. + #[serde(skip_serializing_if="Option::is_none")] + pub verify_new: Option, + /// Send job email notification to this user + #[serde(skip_serializing_if="Option::is_none")] + pub notify_user: Option, + /// Send notification only for job errors + #[serde(skip_serializing_if="Option::is_none")] + pub notify: Option, +} + #[api( properties: { store: { diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 03f184ea..388bf0f7 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -5,8 +5,7 @@ use anyhow::bail; use proxmox::api::api; use proxmox::api::schema::{ - ApiStringFormat, ApiType, ArraySchema, IntegerSchema, ReturnType, Schema, - StringSchema, + ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, }; use proxmox::const_regex; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; @@ -246,34 +245,6 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = .format(&FINGERPRINT_SHA256_FORMAT) .schema(); -pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = - IntegerSchema::new("Number of hourly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = - IntegerSchema::new("Number of monthly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = - IntegerSchema::new("Number of weekly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = - IntegerSchema::new("Number of yearly backups to keep.") - .minimum(1) - .schema(); - pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); From 157d9a45e1349d792437aed37f3b16778e5a557e Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 10 Sep 2021 09:21:27 +0200 Subject: [PATCH 038/299] move PruneOptions to pbs_api_types workspace --- pbs-api-types/src/datastore.rs | 46 ++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 039cd71a..39e44de6 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -119,6 +119,52 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); +#[api( + properties: { + "keep-last": { + schema: PRUNE_SCHEMA_KEEP_LAST, + optional: true, + }, + "keep-hourly": { + schema: PRUNE_SCHEMA_KEEP_HOURLY, + optional: true, + }, + "keep-daily": { + schema: PRUNE_SCHEMA_KEEP_DAILY, + optional: true, + }, + "keep-weekly": { + schema: PRUNE_SCHEMA_KEEP_WEEKLY, + optional: true, + }, + "keep-monthly": { + schema: PRUNE_SCHEMA_KEEP_MONTHLY, + optional: true, + }, + "keep-yearly": { + schema: PRUNE_SCHEMA_KEEP_YEARLY, + optional: true, + }, + } +)] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Common pruning options +pub struct PruneOptions { + #[serde(skip_serializing_if="Option::is_none")] + pub keep_last: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_hourly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_daily: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_weekly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_monthly: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub keep_yearly: Option, +} + #[api( properties: { name: { From 441b34e46e889042e25dc5a72b6fd54f3c3a4253 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 10 Sep 2021 12:25:32 +0200 Subject: [PATCH 039/299] more api type cleanups: avoid re-exports --- pbs-api-types/src/datastore.rs | 82 ++++++++++++++++++++++++++++ pbs-api-types/src/jobs.rs | 4 -- pbs-api-types/src/lib.rs | 98 +--------------------------------- pbs-api-types/src/upid.rs | 70 +++++++++++++++++++++--- 4 files changed, 148 insertions(+), 106 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 39e44de6..75f82ea4 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -506,6 +506,88 @@ pub struct TypeCounts { pub snapshots: u64, } +#[api( + properties: { + "upid": { + optional: true, + type: UPID, + }, + }, +)] +#[derive(Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Garbage collection status. +pub struct GarbageCollectionStatus { + pub upid: Option, + /// Number of processed index files. + pub index_file_count: usize, + /// Sum of bytes referred by index files. + pub index_data_bytes: u64, + /// Bytes used on disk. + pub disk_bytes: u64, + /// Chunks used on disk. + pub disk_chunks: usize, + /// Sum of removed bytes. + pub removed_bytes: u64, + /// Number of removed chunks. + pub removed_chunks: usize, + /// Sum of pending bytes (pending removal - kept for safety). + pub pending_bytes: u64, + /// Number of pending chunks (pending removal - kept for safety). + pub pending_chunks: usize, + /// Number of chunks marked as .bad by verify that have been removed by GC. + pub removed_bad: usize, + /// Number of chunks still marked as .bad after garbage collection. + pub still_bad: usize, +} + +impl Default for GarbageCollectionStatus { + fn default() -> Self { + GarbageCollectionStatus { + upid: None, + index_file_count: 0, + index_data_bytes: 0, + disk_bytes: 0, + disk_chunks: 0, + removed_bytes: 0, + removed_chunks: 0, + pending_bytes: 0, + pending_chunks: 0, + removed_bad: 0, + still_bad: 0, + } + } +} + +#[api( + properties: { + "gc-status": { + type: GarbageCollectionStatus, + optional: true, + }, + counts: { + type: Counts, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all="kebab-case")] +/// Overall Datastore status and useful information. +pub struct DataStoreStatus { + /// Total space (bytes). + pub total: u64, + /// Used space (bytes). + pub used: u64, + /// Available space (bytes). + pub avail: u64, + /// Status of last GC + #[serde(skip_serializing_if="Option::is_none")] + pub gc_status: Option, + /// Group/Snapshot counts + #[serde(skip_serializing_if="Option::is_none")] + pub counts: Option, +} pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { optional: false, diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index a96fcc10..1526dbc4 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -17,10 +17,6 @@ const_regex!{ pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); } -pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.") - .max_length(256) - .schema(); - pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 388bf0f7..6b0246f5 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -4,9 +4,7 @@ use serde::{Deserialize, Serialize}; use anyhow::bail; use proxmox::api::api; -use proxmox::api::schema::{ - ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, -}; +use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema}; use proxmox::const_regex; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; @@ -63,7 +61,7 @@ mod user; pub use user::*; pub mod upid; -pub use upid::UPID; +pub use upid::*; mod crypto; pub use crypto::{CryptMode, Fingerprint}; @@ -276,58 +274,6 @@ pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( /// API schema format definition for repository URLs pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); -#[api( - properties: { - "upid": { - optional: true, - type: UPID, - }, - }, -)] -#[derive(Clone, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Garbage collection status. -pub struct GarbageCollectionStatus { - pub upid: Option, - /// Number of processed index files. - pub index_file_count: usize, - /// Sum of bytes referred by index files. - pub index_data_bytes: u64, - /// Bytes used on disk. - pub disk_bytes: u64, - /// Chunks used on disk. - pub disk_chunks: usize, - /// Sum of removed bytes. - pub removed_bytes: u64, - /// Number of removed chunks. - pub removed_chunks: usize, - /// Sum of pending bytes (pending removal - kept for safety). - pub pending_bytes: u64, - /// Number of pending chunks (pending removal - kept for safety). - pub pending_chunks: usize, - /// Number of chunks marked as .bad by verify that have been removed by GC. - pub removed_bad: usize, - /// Number of chunks still marked as .bad after garbage collection. - pub still_bad: usize, -} - -impl Default for GarbageCollectionStatus { - fn default() -> Self { - GarbageCollectionStatus { - upid: None, - index_file_count: 0, - index_data_bytes: 0, - disk_bytes: 0, - disk_chunks: 0, - removed_bytes: 0, - removed_chunks: 0, - pending_bytes: 0, - pending_chunks: 0, - removed_bad: 0, - still_bad: 0, - } - } -} // Complex type definitions @@ -383,46 +329,6 @@ impl std::convert::TryFrom> for RsaPubK } } -#[api( - properties: { - upid: { schema: UPID::API_SCHEMA }, - }, -)] -#[derive(Serialize, Deserialize)] -/// Task properties. -pub struct TaskListItem { - pub upid: String, - /// The node name where the task is running on. - pub node: String, - /// The Unix PID - pub pid: i64, - /// The task start time (Epoch) - pub pstart: u64, - /// The task start time (Epoch) - pub starttime: i64, - /// Worker type (arbitrary ASCII string) - pub worker_type: String, - /// Worker ID (arbitrary ASCII string) - pub worker_id: Option, - /// The authenticated entity who started the task - pub user: Authid, - /// The task end time (Epoch) - #[serde(skip_serializing_if="Option::is_none")] - pub endtime: Option, - /// Task end status - #[serde(skip_serializing_if="Option::is_none")] - pub status: Option, -} - - -pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "A list of tasks.", - &TaskListItem::API_SCHEMA, - ).schema(), -}; - #[api()] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs index 9447b8a0..50d70b67 100644 --- a/pbs-api-types/src/upid.rs +++ b/pbs-api-types/src/upid.rs @@ -1,8 +1,10 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use anyhow::{bail, Error}; +use serde::{Deserialize, Serialize}; -use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema}; +use proxmox::api::api; +use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType}; use proxmox::const_regex; use proxmox::sys::linux::procfs; @@ -54,12 +56,14 @@ const_regex! { pub const PROXMOX_UPID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX); +pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") + .min_length("UPID:N:12345678:12345678:12345678:::".len()) + .max_length(128) // arbitrary + .format(&PROXMOX_UPID_FORMAT) + .schema(); + impl ApiType for UPID { - const API_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") - .min_length("UPID:N:12345678:12345678:12345678:::".len()) - .max_length(128) // arbitrary - .format(&PROXMOX_UPID_FORMAT) - .schema(); + const API_SCHEMA: Schema = UPID_SCHEMA; } impl UPID { @@ -143,3 +147,57 @@ impl std::fmt::Display for UPID { self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id) } } + +#[api()] +#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TaskStateType { + /// Ok + OK, + /// Warning + Warning, + /// Error + Error, + /// Unknown + Unknown, +} + +#[api( + properties: { + upid: { schema: UPID::API_SCHEMA }, + }, +)] +#[derive(Serialize, Deserialize)] +/// Task properties. +pub struct TaskListItem { + pub upid: String, + /// The node name where the task is running on. + pub node: String, + /// The Unix PID + pub pid: i64, + /// The task start time (Epoch) + pub pstart: u64, + /// The task start time (Epoch) + pub starttime: i64, + /// Worker type (arbitrary ASCII string) + pub worker_type: String, + /// Worker ID (arbitrary ASCII string) + pub worker_id: Option, + /// The authenticated entity who started the task + pub user: Authid, + /// The task end time (Epoch) + #[serde(skip_serializing_if="Option::is_none")] + pub endtime: Option, + /// Task end status + #[serde(skip_serializing_if="Option::is_none")] + pub status: Option, +} + +pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "A list of tasks.", + &TaskListItem::API_SCHEMA, + ).schema(), +}; + From 1eb2dd5dacb07e5ccecba7e0746ed9e39e22f305 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 21 Sep 2021 07:58:40 +0200 Subject: [PATCH 040/299] move ApiConfig, FileLogger and CommandoSocket to proxmox-rest-server workspace ApiConfig: avoid using pbs_config::backup_user() CommandoSocket: avoid using pbs_config::backup_user() FileLogger: avoid using pbs_config::backup_user() - use atomic_open_or_create_file() Auth Trait: moved definitions to proxmox-rest-server/src/lib.rs - removed CachedUserInfo patrameter - return user as String (not Authid) Signed-off-by: Thomas Lamprecht --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 15507328..02c8c2d4 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.13.0", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] } pbs-systemd = { path = "../pbs-systemd" } pbs-tools = { path = "../pbs-tools" } From 3d428713c5cf5c399d7eb5853e53df71883e6d9a Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 21 Sep 2021 09:33:51 +0200 Subject: [PATCH 041/299] rename pbs-systemd to proxmox-systemd --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/jobs.rs | 8 ++++---- pbs-api-types/src/tape/media_pool.rs | 2 +- pbs-api-types/src/upid.rs | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 02c8c2d4..a64d7f0a 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -16,5 +16,5 @@ serde = { version = "1.0", features = ["derive"] } proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] } -pbs-systemd = { path = "../pbs-systemd" } +proxmox-systemd = { path = "../proxmox-systemd" } pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 1526dbc4..a9c64779 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -25,25 +25,25 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run sync job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) .type_text("") .schema(); pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run garbage collection job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) .type_text("") .schema(); pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run prune job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) .type_text("") .schema(); pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run verify job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) .type_text("") .schema(); diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs index 53e46788..9e3d8b56 100644 --- a/pbs-api-types/src/tape/media_pool.rs +++ b/pbs-api-types/src/tape/media_pool.rs @@ -14,7 +14,7 @@ use proxmox::api::{ schema::{Schema, StringSchema, ApiStringFormat, Updater}, }; -use pbs_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; +use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; use crate::{ PROXMOX_SAFE_ID_FORMAT, diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs index 50d70b67..ba23a646 100644 --- a/pbs-api-types/src/upid.rs +++ b/pbs-api-types/src/upid.rs @@ -109,7 +109,7 @@ impl std::str::FromStr for UPID { let worker_id = if cap["wid"].is_empty() { None } else { - let wid = pbs_systemd::unescape_unit(&cap["wid"])?; + let wid = proxmox_systemd::unescape_unit(&cap["wid"])?; Some(wid) }; @@ -135,7 +135,7 @@ impl std::fmt::Display for UPID { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let wid = if let Some(ref id) = self.worker_id { - pbs_systemd::escape_unit(id, false) + proxmox_systemd::escape_unit(id, false) } else { String::new() }; From ecb6b64f1848c56609740bb322247b4015998f1c Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 21 Sep 2021 12:14:19 +0200 Subject: [PATCH 042/299] src/server/worker_task.rs: Avoid using pbs-api-type::Authid Because we want to move worker_task.rs into proxmox-rest-server crate. --- pbs-api-types/src/upid.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs index ba23a646..29135bca 100644 --- a/pbs-api-types/src/upid.rs +++ b/pbs-api-types/src/upid.rs @@ -8,8 +8,6 @@ use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, Array use proxmox::const_regex; use proxmox::sys::linux::procfs; -use crate::Authid; - /// Unique Process/Task Identifier /// /// We use this to uniquely identify worker task. UPIDs have a short @@ -37,7 +35,7 @@ pub struct UPID { /// Worker ID (arbitrary ASCII string) pub worker_id: Option, /// The authenticated entity who started the task - pub auth_id: Authid, + pub auth_id: String, /// The node name. pub node: String, } @@ -71,7 +69,7 @@ impl UPID { pub fn new( worker_type: &str, worker_id: Option, - auth_id: Authid, + auth_id: String, ) -> Result { let pid = unsafe { libc::getpid() }; @@ -82,6 +80,10 @@ impl UPID { bail!("illegal characters in worker type '{}'", worker_type); } + if auth_id.contains(bad) { + bail!("illegal characters in auth_id '{}'", auth_id); + } + static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0); let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst); @@ -184,7 +186,7 @@ pub struct TaskListItem { /// Worker ID (arbitrary ASCII string) pub worker_id: Option, /// The authenticated entity who started the task - pub user: Authid, + pub user: String, /// The task end time (Epoch) #[serde(skip_serializing_if="Option::is_none")] pub endtime: Option, @@ -200,4 +202,3 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { &TaskListItem::API_SCHEMA, ).schema(), }; - From 59c7c360e8941c99b6cd15bfd730c7aa043a432c Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 22 Sep 2021 12:46:44 +0200 Subject: [PATCH 043/299] use UPID and systemd helpers from proxmox 0.13.4 --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/lib.rs | 59 ++++++++++- pbs-api-types/src/upid.rs | 204 -------------------------------------- 3 files changed, 57 insertions(+), 208 deletions(-) delete mode 100644 pbs-api-types/src/upid.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index a64d7f0a..878d6417 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.13.4", default-features = false, features = [ "api-macro" ] } proxmox-systemd = { path = "../proxmox-systemd" } pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 6b0246f5..f7521b02 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use anyhow::bail; use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema}; +use proxmox::api::schema::{ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType}; use proxmox::const_regex; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; @@ -60,8 +60,7 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN mod user; pub use user::*; -pub mod upid; -pub use upid::*; +pub use proxmox::api::upid::*; mod crypto; pub use crypto::{CryptMode, Fingerprint}; @@ -397,3 +396,57 @@ pub enum NodePowerCommand { /// Shutdown the server Shutdown, } + + +#[api()] +#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TaskStateType { + /// Ok + OK, + /// Warning + Warning, + /// Error + Error, + /// Unknown + Unknown, +} + +#[api( + properties: { + upid: { schema: UPID::API_SCHEMA }, + }, +)] +#[derive(Serialize, Deserialize)] +/// Task properties. +pub struct TaskListItem { + pub upid: String, + /// The node name where the task is running on. + pub node: String, + /// The Unix PID + pub pid: i64, + /// The task start time (Epoch) + pub pstart: u64, + /// The task start time (Epoch) + pub starttime: i64, + /// Worker type (arbitrary ASCII string) + pub worker_type: String, + /// Worker ID (arbitrary ASCII string) + pub worker_id: Option, + /// The authenticated entity who started the task + pub user: String, + /// The task end time (Epoch) + #[serde(skip_serializing_if="Option::is_none")] + pub endtime: Option, + /// Task end status + #[serde(skip_serializing_if="Option::is_none")] + pub status: Option, +} + +pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "A list of tasks.", + &TaskListItem::API_SCHEMA, + ).schema(), +}; diff --git a/pbs-api-types/src/upid.rs b/pbs-api-types/src/upid.rs deleted file mode 100644 index 29135bca..00000000 --- a/pbs-api-types/src/upid.rs +++ /dev/null @@ -1,204 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -use anyhow::{bail, Error}; -use serde::{Deserialize, Serialize}; - -use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType}; -use proxmox::const_regex; -use proxmox::sys::linux::procfs; - -/// Unique Process/Task Identifier -/// -/// We use this to uniquely identify worker task. UPIDs have a short -/// string repesentaion, which gives additional information about the -/// type of the task. for example: -/// ```text -/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}: -/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam: -/// ``` -/// Please note that we use tokio, so a single thread can run multiple -/// tasks. -// #[api] - manually implemented API type -#[derive(Debug, Clone)] -pub struct UPID { - /// The Unix PID - pub pid: libc::pid_t, - /// The Unix process start time from `/proc/pid/stat` - pub pstart: u64, - /// The task start time (Epoch) - pub starttime: i64, - /// The task ID (inside the process/thread) - pub task_id: usize, - /// Worker type (arbitrary ASCII string) - pub worker_type: String, - /// Worker ID (arbitrary ASCII string) - pub worker_id: Option, - /// The authenticated entity who started the task - pub auth_id: String, - /// The node name. - pub node: String, -} - -proxmox::forward_serialize_to_display!(UPID); -proxmox::forward_deserialize_to_from_str!(UPID); - -const_regex! { - pub PROXMOX_UPID_REGEX = concat!( - r"^UPID:(?P[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P[0-9A-Fa-f]{8}):", - r"(?P[0-9A-Fa-f]{8,9}):(?P[0-9A-Fa-f]{8,16}):(?P[0-9A-Fa-f]{8}):", - r"(?P[^:\s]+):(?P[^:\s]*):(?P[^:\s]+):$" - ); -} - -pub const PROXMOX_UPID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX); - -pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier") - .min_length("UPID:N:12345678:12345678:12345678:::".len()) - .max_length(128) // arbitrary - .format(&PROXMOX_UPID_FORMAT) - .schema(); - -impl ApiType for UPID { - const API_SCHEMA: Schema = UPID_SCHEMA; -} - -impl UPID { - /// Create a new UPID - pub fn new( - worker_type: &str, - worker_id: Option, - auth_id: String, - ) -> Result { - - let pid = unsafe { libc::getpid() }; - - let bad: &[_] = &['/', ':', ' ']; - - if worker_type.contains(bad) { - bail!("illegal characters in worker type '{}'", worker_type); - } - - if auth_id.contains(bad) { - bail!("illegal characters in auth_id '{}'", auth_id); - } - - static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0); - - let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst); - - Ok(UPID { - pid, - pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime, - starttime: proxmox::tools::time::epoch_i64(), - task_id, - worker_type: worker_type.to_owned(), - worker_id, - auth_id, - node: proxmox::tools::nodename().to_owned(), - }) - } -} - - -impl std::str::FromStr for UPID { - type Err = Error; - - fn from_str(s: &str) -> Result { - if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) { - - let worker_id = if cap["wid"].is_empty() { - None - } else { - let wid = proxmox_systemd::unescape_unit(&cap["wid"])?; - Some(wid) - }; - - Ok(UPID { - pid: i32::from_str_radix(&cap["pid"], 16).unwrap(), - pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(), - starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(), - task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(), - worker_type: cap["wtype"].to_string(), - worker_id, - auth_id: cap["authid"].parse()?, - node: cap["node"].to_string(), - }) - } else { - bail!("unable to parse UPID '{}'", s); - } - - } -} - -impl std::fmt::Display for UPID { - - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - - let wid = if let Some(ref id) = self.worker_id { - proxmox_systemd::escape_unit(id, false) - } else { - String::new() - }; - - // Note: pstart can be > 32bit if uptime > 497 days, so this can result in - // more that 8 characters for pstart - - write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:", - self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id) - } -} - -#[api()] -#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum TaskStateType { - /// Ok - OK, - /// Warning - Warning, - /// Error - Error, - /// Unknown - Unknown, -} - -#[api( - properties: { - upid: { schema: UPID::API_SCHEMA }, - }, -)] -#[derive(Serialize, Deserialize)] -/// Task properties. -pub struct TaskListItem { - pub upid: String, - /// The node name where the task is running on. - pub node: String, - /// The Unix PID - pub pid: i64, - /// The task start time (Epoch) - pub pstart: u64, - /// The task start time (Epoch) - pub starttime: i64, - /// Worker type (arbitrary ASCII string) - pub worker_type: String, - /// Worker ID (arbitrary ASCII string) - pub worker_id: Option, - /// The authenticated entity who started the task - pub user: String, - /// The task end time (Epoch) - #[serde(skip_serializing_if="Option::is_none")] - pub endtime: Option, - /// Task end status - #[serde(skip_serializing_if="Option::is_none")] - pub status: Option, -} - -pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "A list of tasks.", - &TaskListItem::API_SCHEMA, - ).schema(), -}; From 359b00675a7af187d03c87146674908764a720af Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 28 Sep 2021 10:11:56 +0200 Subject: [PATCH 044/299] ExtJsFormatter: use ParameterError to correctly compute 'errors' By default, 'errors' is now empty. Depend on proxmox 0.13.5. --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 878d6417..1ca768de 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.13.4", default-features = false, features = [ "api-macro" ] } +proxmox = { version = "0.13.5", default-features = false, features = [ "api-macro" ] } proxmox-systemd = { path = "../proxmox-systemd" } pbs-tools = { path = "../pbs-tools" } From a5298b2a10496373639719d37ee7553d67bbf5bc Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 4 Oct 2021 14:58:55 +0200 Subject: [PATCH 045/299] fix deprecated use of std::u64/... modules integer primitive type modules are deprecated, use associated constants instead Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/acl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index 140ff58e..ad8c66a7 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -76,7 +76,7 @@ constnamedbitmap! { /// Admin always has all privileges. It can do everything except a few actions /// which are limited to the 'root@pam` superuser -pub const ROLE_ADMIN: u64 = std::u64::MAX; +pub const ROLE_ADMIN: u64 = u64::MAX; /// NoAccess can be used to remove privileges from specific (sub-)paths pub const ROLE_NO_ACCESS: u64 = 0; From 9871af7ece4041fcdfe49c202c71b993abf20236 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 6 Oct 2021 07:06:17 +0200 Subject: [PATCH 046/299] move RRD code into proxmox-rrd crate --- pbs-api-types/src/lib.rs | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index f7521b02..df068b1b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -358,33 +358,6 @@ pub struct APTUpdateInfo { pub extra_info: Option, } -#[api()] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "UPPERCASE")] -pub enum RRDMode { - /// Maximum - Max, - /// Average - Average, -} - - -#[api()] -#[repr(u64)] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum RRDTimeFrameResolution { - /// 1 min => last 70 minutes - Hour = 60, - /// 30 min => last 35 hours - Day = 60*30, - /// 3 hours => about 8 days - Week = 60*180, - /// 12 hours => last 35 days - Month = 60*720, - /// 1 week => last 490 days - Year = 60*10080, -} #[api()] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] From 5b3283c5d4375069e9ea5abca233388cd655ffe6 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 6 Oct 2021 09:49:51 +0200 Subject: [PATCH 047/299] split out RRD api types into proxmox-rrd-api-types crate --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/lib.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 1ca768de..4bd7b136 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -16,5 +16,6 @@ serde = { version = "1.0", features = ["derive"] } proxmox = { version = "0.13.5", default-features = false, features = [ "api-macro" ] } +proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" } proxmox-systemd = { path = "../proxmox-systemd" } pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index df068b1b..83ef386b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -423,3 +423,5 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { &TaskListItem::API_SCHEMA, ).schema(), }; + +pub use proxmox_rrd_api_types::{RRDMode, RRDTimeFrameResolution}; From 1aaac3f173ae740aaa4ce9c5a603eb090fea4658 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 8 Oct 2021 11:18:22 +0200 Subject: [PATCH 048/299] bump proxmox dependency to 0.14.0 and proxmox-http to 0.5.0 Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 4bd7b136..ade37721 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = { version = "0.13.5", default-features = false, features = [ "api-macro" ] } +proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] } proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" } proxmox-systemd = { path = "../proxmox-systemd" } From d18f79dd4fb7f8096e35232fd52aac3ce129282e Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 8 Oct 2021 11:19:37 +0200 Subject: [PATCH 049/299] update to first proxmox crate split Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 4 ++ pbs-api-types/src/acl.rs | 30 ++++++-------- pbs-api-types/src/crypto.rs | 2 +- pbs-api-types/src/datastore.rs | 9 ++-- pbs-api-types/src/file_restore.rs | 2 +- pbs-api-types/src/jobs.rs | 4 +- pbs-api-types/src/key_derivation.rs | 2 +- pbs-api-types/src/lib.rs | 8 ++-- pbs-api-types/src/network.rs | 2 +- pbs-api-types/src/remote.rs | 2 +- pbs-api-types/src/tape/changer.rs | 52 ++++++++++-------------- pbs-api-types/src/tape/device.rs | 2 +- pbs-api-types/src/tape/drive.rs | 5 +-- pbs-api-types/src/tape/media.rs | 6 +-- pbs-api-types/src/tape/media_location.rs | 47 ++++++++------------- pbs-api-types/src/tape/media_pool.rs | 5 +-- pbs-api-types/src/tape/media_status.rs | 4 +- pbs-api-types/src/tape/mod.rs | 9 ++-- pbs-api-types/src/user.rs | 9 ++-- pbs-api-types/src/userid.rs | 6 +-- pbs-api-types/src/zfs.rs | 4 +- 21 files changed, 87 insertions(+), 127 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index ade37721..2a51bd3a 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,11 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } +proxmox = "0.14.0" +proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] } +proxmox-time = "1.0.0" +proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" } proxmox-systemd = { path = "../proxmox-systemd" } diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index ad8c66a7..6cdd0ee6 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -1,13 +1,12 @@ use std::str::FromStr; -use serde::{Deserialize, Serialize}; use serde::de::{value, IntoDeserializer}; +use serde::{Deserialize, Serialize}; -use proxmox::api::api; -use proxmox::api::schema::{ - ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema, +use proxmox_lang::constnamedbitmap; +use proxmox_schema::{ + api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema, }; -use proxmox::{constnamedbitmap, const_regex}; const_regex! { pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$"); @@ -222,7 +221,6 @@ pub enum Role { TapeReader = ROLE_TAPE_READER, } - impl FromStr for Role { type Err = value::Error; @@ -231,26 +229,24 @@ impl FromStr for Role { } } -pub const ACL_PATH_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&ACL_PATH_REGEX); +pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX); -pub const ACL_PATH_SCHEMA: Schema = StringSchema::new( - "Access control path.") +pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.") .format(&ACL_PATH_FORMAT) .min_length(1) .max_length(128) .schema(); -pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new( - "Allow to propagate (inherit) permissions.") - .default(true) - .schema(); +pub const ACL_PROPAGATE_SCHEMA: Schema = + BooleanSchema::new("Allow to propagate (inherit) permissions.") + .default(true) + .schema(); -pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new( - "Type of 'ugid' property.") +pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.") .format(&ApiStringFormat::Enum(&[ EnumEntry::new("user", "User"), - EnumEntry::new("group", "Group")])) + EnumEntry::new("group", "Group"), + ])) .schema(); #[api( diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs index 7b36e85f..016970f8 100644 --- a/pbs-api-types/src/crypto.rs +++ b/pbs-api-types/src/crypto.rs @@ -3,7 +3,7 @@ use std::fmt::{self, Display}; use anyhow::Error; use serde::{Deserialize, Serialize}; -use proxmox::api::api; +use proxmox_schema::api; use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint}; diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 75f82ea4..462081e4 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,13 +1,10 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::api; -use proxmox::api::schema::{ - ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema, - StringSchema, Updater, +use proxmox_schema::{ + api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, + Schema, StringSchema, Updater, }; -use proxmox::const_regex; - use crate::{ PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID, Fingerprint, Userid, Authid, diff --git a/pbs-api-types/src/file_restore.rs b/pbs-api-types/src/file_restore.rs index eedb172b..5748f3a7 100644 --- a/pbs-api-types/src/file_restore.rs +++ b/pbs-api-types/src/file_restore.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::api; +use proxmox_schema::api; #[api] #[derive(Serialize, Deserialize)] diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index a9c64779..f47a294a 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -1,8 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox::const_regex; - -use proxmox::api::{api, schema::*}; +use proxmox_schema::*; use crate::{ Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, diff --git a/pbs-api-types/src/key_derivation.rs b/pbs-api-types/src/key_derivation.rs index 9a53130c..26b86c30 100644 --- a/pbs-api-types/src/key_derivation.rs +++ b/pbs-api-types/src/key_derivation.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::api; +use proxmox_schema::api; use crate::CERT_FINGERPRINT_SHA256_SCHEMA; diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 83ef386b..cdf765a1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,9 +3,9 @@ use serde::{Deserialize, Serialize}; use anyhow::bail; -use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType}; -use proxmox::const_regex; +use proxmox_schema::{ + api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, +}; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; #[rustfmt::skip] @@ -60,7 +60,7 @@ pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN mod user; pub use user::*; -pub use proxmox::api::upid::*; +pub use proxmox_schema::upid::*; mod crypto; pub use crypto::{CryptMode, Fingerprint}; diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index d3a8e43c..de27df7e 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::{api, schema::*}; +use proxmox_schema::*; use crate::{ PROXMOX_SAFE_ID_REGEX, diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index 2784e353..15c336b7 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use super::*; -use proxmox::api::{api, schema::*}; +use proxmox_schema::*; pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.") .format(&PASSWORD_FORMAT) diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs index e36eb32c..c9c7fcaa 100644 --- a/pbs-api-types/src/tape/changer.rs +++ b/pbs-api-types/src/tape/changer.rs @@ -2,22 +2,11 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::{ - api, - schema::{ - Schema, - ApiStringFormat, - ArraySchema, - IntegerSchema, - StringSchema, - Updater, - }, +use proxmox_schema::{ + api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, }; -use crate::{ - PROXMOX_SAFE_ID_FORMAT, - OptionalDeviceIdentification, -}; +use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT}; pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -25,9 +14,8 @@ pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifi .max_length(32) .schema(); -pub const SCSI_CHANGER_PATH_SCHEMA: Schema = StringSchema::new( - "Path to Linux generic SCSI device (e.g. '/dev/sg4')") - .schema(); +pub const SCSI_CHANGER_PATH_SCHEMA: Schema = + StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema(); pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -36,16 +24,18 @@ pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.") .schema(); pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Slot list.", &IntegerSchema::new("Slot number") - .minimum(1) - .schema()) - .schema(); + "Slot list.", + &IntegerSchema::new("Slot number").minimum(1).schema(), +) +.schema(); -pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new("\ +pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new( + "\ A list of slot numbers, comma separated. Those slots are reserved for Import/Export, i.e. any media in those slots are considered to be 'offline'. -") +", +) .format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) .schema(); @@ -63,14 +53,14 @@ Import/Export, i.e. any media in those slots are considered to be }, }, )] -#[derive(Serialize,Deserialize,Updater)] +#[derive(Serialize, Deserialize, Updater)] #[serde(rename_all = "kebab-case")] /// SCSI tape changer pub struct ScsiTapeChanger { #[updater(skip)] pub name: String, pub path: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub export_slots: Option, } @@ -84,7 +74,7 @@ pub struct ScsiTapeChanger { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Changer config with optional device identification attributes pub struct ChangerListEntry { @@ -95,7 +85,7 @@ pub struct ChangerListEntry { } #[api()] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Mtx Entry Kind pub enum MtxEntryKind { @@ -118,7 +108,7 @@ pub enum MtxEntryKind { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Mtx Status Entry pub struct MtxStatusEntry { @@ -126,12 +116,12 @@ pub struct MtxStatusEntry { /// The ID of the slot or drive pub entry_id: u64, /// The media label (volume tag) if the slot/drive is full - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub label_text: Option, /// The slot the drive was loaded from - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub loaded_slot: Option, /// The current state of the drive - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub state: Option, } diff --git a/pbs-api-types/src/tape/device.rs b/pbs-api-types/src/tape/device.rs index 368a0015..54fad8b0 100644 --- a/pbs-api-types/src/tape/device.rs +++ b/pbs-api-types/src/tape/device.rs @@ -1,6 +1,6 @@ use ::serde::{Deserialize, Serialize}; -use proxmox::api::api; +use proxmox_schema::api; #[api()] #[derive(Serialize,Deserialize)] diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index 3e207a99..e177d39f 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -4,10 +4,7 @@ use std::convert::TryFrom; use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; -use proxmox::api::{ - api, - schema::{Schema, IntegerSchema, StringSchema, Updater}, -}; +use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater}; use crate::{ PROXMOX_SAFE_ID_FORMAT, diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs index 4e301c98..61d7be04 100644 --- a/pbs-api-types/src/tape/media.rs +++ b/pbs-api-types/src/tape/media.rs @@ -1,9 +1,7 @@ use ::serde::{Deserialize, Serialize}; -use proxmox::{ - api::{api, schema::*}, - tools::Uuid, -}; +use proxmox_schema::*; +use proxmox_uuid::Uuid; use crate::{ UUID_FORMAT, diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs index a917c609..a05f1156 100644 --- a/pbs-api-types/src/tape/media_location.rs +++ b/pbs-api-types/src/tape/media_location.rs @@ -1,18 +1,8 @@ use anyhow::{bail, Error}; -use proxmox::api::{ - schema::{ - Schema, - StringSchema, - ApiStringFormat, - parse_simple_value, - }, -}; +use proxmox_schema::{parse_simple_value, ApiStringFormat, Schema, StringSchema}; -use crate::{ - PROXMOX_SAFE_ID_FORMAT, - CHANGER_NAME_SCHEMA, -}; +use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -35,28 +25,27 @@ pub enum MediaLocation { proxmox::forward_deserialize_to_from_str!(MediaLocation); proxmox::forward_serialize_to_display!(MediaLocation); -impl proxmox::api::schema::ApiType for MediaLocation { +impl proxmox_schema::ApiType for MediaLocation { const API_SCHEMA: Schema = StringSchema::new( - "Media location (e.g. 'offline', 'online-', 'vault-')") - .format(&ApiStringFormat::VerifyFn(|text| { - let location: MediaLocation = text.parse()?; - match location { - MediaLocation::Online(ref changer) => { - parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?; - } - MediaLocation::Vault(ref vault) => { - parse_simple_value(vault, &VAULT_NAME_SCHEMA)?; - } - MediaLocation::Offline => { /* OK */} + "Media location (e.g. 'offline', 'online-', 'vault-')", + ) + .format(&ApiStringFormat::VerifyFn(|text| { + let location: MediaLocation = text.parse()?; + match location { + MediaLocation::Online(ref changer) => { + parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?; } - Ok(()) - })) - .schema(); + MediaLocation::Vault(ref vault) => { + parse_simple_value(vault, &VAULT_NAME_SCHEMA)?; + } + MediaLocation::Offline => { /* OK */ } + } + Ok(()) + })) + .schema(); } - impl std::fmt::Display for MediaLocation { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { MediaLocation::Offline => { diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs index 9e3d8b56..148ae051 100644 --- a/pbs-api-types/src/tape/media_pool.rs +++ b/pbs-api-types/src/tape/media_pool.rs @@ -9,10 +9,7 @@ use std::str::FromStr; use anyhow::Error; use serde::{Deserialize, Serialize}; -use proxmox::api::{ - api, - schema::{Schema, StringSchema, ApiStringFormat, Updater}, -}; +use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater}; use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; diff --git a/pbs-api-types/src/tape/media_status.rs b/pbs-api-types/src/tape/media_status.rs index 5a3bff96..9815b91f 100644 --- a/pbs-api-types/src/tape/media_status.rs +++ b/pbs-api-types/src/tape/media_status.rs @@ -1,6 +1,6 @@ -use ::serde::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; -use proxmox::api::api; +use proxmox_schema::api; #[api()] /// Media status diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index a77be7f7..58777a52 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -22,13 +22,10 @@ pub use media_location::*; mod media; pub use media::*; -use ::serde::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; -use proxmox::api::api; -use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat}; -use proxmox::tools::Uuid; - -use proxmox::const_regex; +use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat}; +use proxmox_uuid::Uuid; use crate::{ FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 8a7480ad..94ed07c0 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -1,8 +1,7 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::api; -use proxmox::api::schema::{ - BooleanSchema, IntegerSchema, Schema, StringSchema, Updater, +use proxmox_schema::{ + api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater, }; use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; @@ -133,7 +132,7 @@ impl ApiToken { return false; } if let Some(expire) = self.expire { - let now = proxmox::tools::time::epoch_i64(); + let now = proxmox_time::epoch_i64(); if expire > 0 && expire <= now { return false; } @@ -198,7 +197,7 @@ impl User { return false; } if let Some(expire) = self.expire { - let now = proxmox::tools::time::epoch_i64(); + let now = proxmox_time::epoch_i64(); if expire > 0 && expire <= now { return false; } diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 1794c720..8c58da2e 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -29,9 +29,9 @@ use anyhow::{bail, format_err, Error}; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; -use proxmox::api::api; -use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType}; -use proxmox::const_regex; +use proxmox_schema::{ + api, const_regex, ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType, +}; // we only allow a limited set of characters // colon is not allowed, because we store usernames in diff --git a/pbs-api-types/src/zfs.rs b/pbs-api-types/src/zfs.rs index 517ca914..5fe49561 100644 --- a/pbs-api-types/src/zfs.rs +++ b/pbs-api-types/src/zfs.rs @@ -1,8 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox::api::{api, schema::*}; - -use proxmox::const_regex; +use proxmox_schema::*; const_regex! { pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$"; From 6e30f3433f46d5d8022d757f07c166cd692c395d Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 13 Oct 2021 10:24:44 +0200 Subject: [PATCH 050/299] remove proxmox-rrd-api-types crate, s/RRDTimeFrameResolution/RRDTimeFrame/ Because the types used inside the RRD have other requirements than the API types: - other serialization format - the API may not support all RRD features Signed-off-by: Dietmar Maurer Signed-off-by: Thomas Lamprecht --- pbs-api-types/Cargo.toml | 1 - pbs-api-types/src/lib.rs | 30 +++++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 2a51bd3a..11644399 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -20,6 +20,5 @@ proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] } proxmox-time = "1.0.0" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } -proxmox-rrd-api-types = { path = "../proxmox-rrd-api-types" } proxmox-systemd = { path = "../proxmox-systemd" } pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index cdf765a1..96ac657b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -424,4 +424,32 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { ).schema(), }; -pub use proxmox_rrd_api_types::{RRDMode, RRDTimeFrameResolution}; +#[api()] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +/// RRD consolidation mode +pub enum RRDMode { + /// Maximum + Max, + /// Average + Average, +} + +#[api()] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// RRD time frame +pub enum RRDTimeFrame { + /// Hour + Hour, + /// Day + Day, + /// Week + Week, + /// Month + Month, + /// Year + Year, + /// Decade (10 years) + Decade, +} From d154224307983cac66c6f1318a004c73526e05f2 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 20 Oct 2021 14:56:15 +0200 Subject: [PATCH 051/299] use new fsync parameter to replace_file and atomic_open_or_create Depend on proxmox 0.15.0 and proxmox-openid 0.8.1 Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 11644399..6a30735c 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = "0.14.0" +proxmox = "0.15.0" proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] } proxmox-time = "1.0.0" From ee72e63fb9e65eb8c1c4f8460512bf5774dab035 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Wed, 27 Oct 2021 13:22:28 +0200 Subject: [PATCH 052/299] add protected info of snapshots to api and task logs adds the info that a snapshot is protected to: * snapshot list * manual pruning (also dry-run) * prune jobs Signed-off-by: Dominik Csapak Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 462081e4..77c1258f 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -390,6 +390,8 @@ pub struct SnapshotListItem { /// The owner of the snapshots group #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, + /// Protection from prunes + pub protected: bool, } #[api( From 2b62255aca6dcb71c859754121dc1407d1e59dc1 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sat, 6 Nov 2021 18:46:58 +0100 Subject: [PATCH 053/299] Add traffic control configuration config with API Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/lib.rs | 7 ++ pbs-api-types/src/traffic_control.rs | 96 ++++++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 pbs-api-types/src/traffic_control.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 6a30735c..840d36bd 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -16,7 +16,7 @@ serde = { version = "1.0", features = ["derive"] } proxmox = "0.15.0" proxmox-lang = "1.0.0" -proxmox-schema = { version = "1.0.0", features = [ "api-macro" ] } +proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } proxmox-time = "1.0.0" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 96ac657b..a61de960 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -7,6 +7,7 @@ use proxmox_schema::{ api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, }; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; +use proxmox_systemd::daily_duration::parse_daily_duration; #[rustfmt::skip] #[macro_export] @@ -73,6 +74,9 @@ pub use remote::*; mod tape; pub use tape::*; +mod traffic_control; +pub use traffic_control::*; + mod zfs; pub use zfs::*; @@ -152,6 +156,9 @@ pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_ pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); +pub const DAILY_DURATION_FORMAT: ApiStringFormat = + ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop)); + pub const SEARCH_DOMAIN_SCHEMA: Schema = StringSchema::new("Search domain for host-name lookup.").schema(); diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs new file mode 100644 index 00000000..0dd7ed58 --- /dev/null +++ b/pbs-api-types/src/traffic_control.rs @@ -0,0 +1,96 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater}; + +use crate::{ + CIDR_SCHEMA, DAILY_DURATION_FORMAT, + PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, +}; + +pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new( + "Timeframe to specify when the rule is actice.") + .format(&DAILY_DURATION_FORMAT) + .schema(); + +pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new( + "Rate limit (for Token bucket filter) in bytes/second.") + .minimum(100_000) + .schema(); + +pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( + "Size of the token bucket (for Token bucket filter) in bytes.") + .minimum(1000) + .schema(); + +#[api( + properties: { + name: { + schema: TRAFFIC_CONTROL_ID_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + "rate-in": { + schema: TRAFFIC_CONTROL_RATE_SCHEMA, + optional: true, + }, + "burst-in": { + schema: TRAFFIC_CONTROL_BURST_SCHEMA, + optional: true, + }, + "rate-out": { + schema: TRAFFIC_CONTROL_RATE_SCHEMA, + optional: true, + }, + "burst-out": { + schema: TRAFFIC_CONTROL_BURST_SCHEMA, + optional: true, + }, + network: { + type: Array, + items: { + schema: CIDR_SCHEMA, + }, + }, + timeframe: { + type: Array, + items: { + schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA, + }, + optional: true, + }, + }, +)] +#[derive(Serialize,Deserialize, Updater)] +#[serde(rename_all = "kebab-case")] +/// Traffic control rule +pub struct TrafficControlRule { + #[updater(skip)] + pub name: String, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + /// Rule applies to Source IPs within this networks + pub network: Vec, + #[serde(skip_serializing_if="Option::is_none")] + pub rate_in: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub burst_in: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub rate_out: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub burst_out: Option, + // fixme: expose this? + // /// Bandwidth is shared accross all connections + // #[serde(skip_serializing_if="Option::is_none")] + // pub shared: Option, + /// Enable the rule at specific times + #[serde(skip_serializing_if="Option::is_none")] + pub timeframe: Option>, +} From 8c1ec5c8021b11d4ef657a55c67b060045e6ebdc Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 17 Nov 2021 07:07:40 +0100 Subject: [PATCH 054/299] move fingerprint helpers from pbs-tools to pbs-api-types Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/crypto.rs | 42 +++++++++++++++++++++++++++++++++++-- pbs-api-types/src/lib.rs | 2 +- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 840d36bd..585bb9c6 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -7,6 +7,7 @@ description = "general API type helpers for PBS" [dependencies] anyhow = "1.0" +hex = "0.4.3" lazy_static = "1.4" libc = "0.2" nix = "0.19.1" @@ -21,4 +22,3 @@ proxmox-time = "1.0.0" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-systemd = { path = "../proxmox-systemd" } -pbs-tools = { path = "../pbs-tools" } diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs index 016970f8..eda92e23 100644 --- a/pbs-api-types/src/crypto.rs +++ b/pbs-api-types/src/crypto.rs @@ -5,8 +5,6 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::api; -use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint}; - #[api(default: "encrypt")] #[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] @@ -55,3 +53,43 @@ impl std::str::FromStr for Fingerprint { } } +fn as_fingerprint(bytes: &[u8]) -> String { + hex::encode(bytes) + .as_bytes() + .chunks(2) + .map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string + .collect::>().join(":") +} + +pub mod bytes_as_fingerprint { + use std::mem::MaybeUninit; + + use serde::{Deserialize, Serializer, Deserializer}; + + pub fn serialize( + bytes: &[u8; 32], + serializer: S, + ) -> Result + where + S: Serializer, + { + let s = super::as_fingerprint(bytes); + serializer.serialize_str(&s) + } + + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result<[u8; 32], D::Error> + where + D: Deserializer<'de>, + { + // TODO: more efficiently implement with a Visitor implementing visit_str using split() and + // hex::decode by-byte + let mut s = String::deserialize(deserializer)?; + s.retain(|c| c != ':'); + let mut out = MaybeUninit::<[u8; 32]>::uninit(); + hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] }) + .map_err(serde::de::Error::custom)?; + Ok(unsafe { out.assume_init() }) + } +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index a61de960..eebf5794 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -64,7 +64,7 @@ pub use user::*; pub use proxmox_schema::upid::*; mod crypto; -pub use crypto::{CryptMode, Fingerprint}; +pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint}; pub mod file_restore; From 4d7cb99f4a4df27f10c2ca7e2588a0421b729c5a Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 17 Nov 2021 12:29:31 +0100 Subject: [PATCH 055/299] proxmox-systemd: remove crate, use new proxmox-time 1.1.0 instead Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 4 +--- pbs-api-types/src/jobs.rs | 8 ++++---- pbs-api-types/src/lib.rs | 2 +- pbs-api-types/src/tape/media_pool.rs | 2 +- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 585bb9c6..4b188c07 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -18,7 +18,5 @@ serde = { version = "1.0", features = ["derive"] } proxmox = "0.15.0" proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } -proxmox-time = "1.0.0" +proxmox-time = "1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } - -proxmox-systemd = { path = "../proxmox-systemd" } diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index f47a294a..66a8d180 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -23,25 +23,25 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run sync job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) .type_text("") .schema(); pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run garbage collection job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) .type_text("") .schema(); pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run prune job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) .type_text("") .schema(); pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( "Run verify job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event)) + .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) .type_text("") .schema(); diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index eebf5794..01c14cc4 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -7,7 +7,7 @@ use proxmox_schema::{ api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, }; use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; -use proxmox_systemd::daily_duration::parse_daily_duration; +use proxmox_time::parse_daily_duration; #[rustfmt::skip] #[macro_export] diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs index 148ae051..3b1cb0f5 100644 --- a/pbs-api-types/src/tape/media_pool.rs +++ b/pbs-api-types/src/tape/media_pool.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater}; -use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; +use proxmox_time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; use crate::{ PROXMOX_SAFE_ID_FORMAT, From 632ab24359f64893cab6412e8f37fec3d6442a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 28 Oct 2021 15:00:48 +0200 Subject: [PATCH 056/299] api-types: add schema for backup group MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the regex was already there, and we need a simple type/schema for passing in multiple groups as Vec/Array via the API. Signed-off-by: Fabian Grünbichler Reviewed-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 77c1258f..b1dd09d4 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -40,6 +40,7 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive .schema(); pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); +pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX); pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") .format(&BACKUP_ID_FORMAT) @@ -57,6 +58,10 @@ pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epo .minimum(1_547_797_308) .schema(); +pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group") + .format(&BACKUP_GROUP_FORMAT) + .schema(); + pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) From 506c106a509ab286d1a461ef15702cb2e15902ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 28 Oct 2021 15:00:49 +0200 Subject: [PATCH 057/299] api: add GroupFilter(List) type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit at the API level, this is a simple (wrapped) Vec of Strings with a verifier function. all users should use the provided helper to get the actual GroupFilter enum values, which can't be directly used in the API schema because of restrictions of the api macro. validation of the schema + parsing into the proper type uses the same fn intentionally to avoid running out of sync, even if it means compiling the REs twice. Signed-off-by: Fabian Grünbichler Reviewed-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 56 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 66a8d180..ec7b0843 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -1,3 +1,7 @@ +use anyhow::format_err; +use std::str::FromStr; + +use regex::Regex; use serde::{Deserialize, Serialize}; use proxmox_schema::*; @@ -5,6 +9,7 @@ use proxmox_schema::*; use crate::{ Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA, + BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA, }; const_regex!{ @@ -317,6 +322,57 @@ pub struct TapeBackupJobStatus { pub next_media_label: Option, } +#[derive(Clone, Debug)] +/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. +pub enum GroupFilter { + /// BackupGroup type - either `vm`, `ct`, or `host`. + BackupType(String), + /// Full identifier of BackupGroup, including type + Group(String), + /// A regular expression matched against the full identifier of the BackupGroup + Regex(Regex), +} + +impl std::str::FromStr for GroupFilter { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s.split_once(":") { + Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())), + Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())), + Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), + Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), + None => Err(format_err!("input doesn't match expected format '|regex:REGEX>'")), + }.map_err(|err| format_err!("'{}' - {}", s, err)) + } +} + +// used for serializing below, caution! +impl std::fmt::Display for GroupFilter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type), + GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group), + GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()), + } + } +} + +proxmox::forward_deserialize_to_from_str!(GroupFilter); +proxmox::forward_serialize_to_display!(GroupFilter); + +fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { + GroupFilter::from_str(input).map(|_| ()) +} + +pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( + "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE').") + .format(&ApiStringFormat::VerifyFn(verify_group_filter)) + .type_text("|group:GROUP|regex:RE>") + .schema(); + +pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); + #[api( properties: { id: { From aea616987b3492ac587f28e34e948b67f7b2502d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 28 Oct 2021 15:00:54 +0200 Subject: [PATCH 058/299] sync: add group filtering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit like for manual pulls, but persisted in the sync job config and visible in the relevant GUI parts. GUI is read-only for now (and defaults to no filtering on creation), as this is a rather advanced feature that requires a complex GUI to be user-friendly (regex-freeform, type-combobox, remote group scanning + selector with additional freeform input). Signed-off-by: Fabian Grünbichler Reviewed-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index ec7b0843..419bdaf7 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -403,6 +403,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group fil optional: true, schema: SYNC_SCHEDULE_SCHEMA, }, + groups: { + schema: GROUP_FILTER_LIST_SCHEMA, + optional: true, + }, } )] #[derive(Serialize,Deserialize,Clone,Updater)] @@ -422,6 +426,8 @@ pub struct SyncJobConfig { pub comment: Option, #[serde(skip_serializing_if="Option::is_none")] pub schedule: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub groups: Option>, } #[api( From c055cdb9104210849396ef82443a313741a883ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 28 Oct 2021 15:00:58 +0200 Subject: [PATCH 059/299] fix #sync.cfg/pull: don't remove by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and convert existing (manually created/edited) jobs to the previous default value of 'true'. the GUI has always set this value and defaults to 'false'. Signed-off-by: Fabian Grünbichler Reviewed-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 419bdaf7..47da53c0 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -52,7 +52,7 @@ pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( "Delete vanished backups. This remove the local copy if the remote backup was deleted.") - .default(true) + .default(false) .schema(); #[api( From f2b4af0322c4148a0afd096c320a03fd89d74140 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 4 Nov 2021 10:56:19 +0100 Subject: [PATCH 060/299] tape backup jobs: add group filters to config/api Signed-off-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 47da53c0..601d86f9 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -247,6 +247,10 @@ pub struct VerificationJobStatus { optional: true, type: Userid, }, + groups: { + schema: GROUP_FILTER_LIST_SCHEMA, + optional: true, + }, } )] #[derive(Serialize,Deserialize,Clone,Updater)] @@ -265,6 +269,8 @@ pub struct TapeBackupJobSetup { /// Send job email notification to this user #[serde(skip_serializing_if="Option::is_none")] pub notify_user: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub groups: Option>, } #[api( From ac8709e97bc5762a064b9c2b151038fd896d9f8f Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 18 Nov 2021 09:50:47 +0100 Subject: [PATCH 061/299] group filter: rename CLI/API/Config "groups" option to "group-filter" we even use that for basically all the related schema names, "groups" allone is just rather not so telling, i.e., "groups" what? While due to the additive nature of `group-filter` is not the best possible name for passing multiple arguments on the CLI (the web-ui can present this more UX-friendly anyway) due to possible confusion about if the filter act like AND vs OR it can be documented and even if a user is confused they still are safe on more being synced than less. Also, the original param name wasn't really _that_ better in that regards Dietmar also suggested to use singular for the CLI option, while there can be more they're passed over repeating the option, each with a single filter. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 601d86f9..2a7e201b 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -247,7 +247,7 @@ pub struct VerificationJobStatus { optional: true, type: Userid, }, - groups: { + "group-filter": { schema: GROUP_FILTER_LIST_SCHEMA, optional: true, }, @@ -270,7 +270,7 @@ pub struct TapeBackupJobSetup { #[serde(skip_serializing_if="Option::is_none")] pub notify_user: Option, #[serde(skip_serializing_if="Option::is_none")] - pub groups: Option>, + pub group_filter: Option>, } #[api( @@ -409,7 +409,7 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group fil optional: true, schema: SYNC_SCHEDULE_SCHEMA, }, - groups: { + "group-filter": { schema: GROUP_FILTER_LIST_SCHEMA, optional: true, }, @@ -433,7 +433,7 @@ pub struct SyncJobConfig { #[serde(skip_serializing_if="Option::is_none")] pub schedule: Option, #[serde(skip_serializing_if="Option::is_none")] - pub groups: Option>, + pub group_filter: Option>, } #[api( From 799df8f345fa0d1c5f86e4c2f6740d38f3420e27 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 18 Nov 2021 11:19:44 +0100 Subject: [PATCH 062/299] openid: allow to configure scopes, prompt, ACRs and arbitrary username-claim values - no longer set prompt to 'login' (makes auto-login possible) - new prompt configuration - allow arbitrary username-claim values Depend on proxmox-openid 0.9.0. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/lib.rs | 3 + pbs-api-types/src/openid.rs | 121 ++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 pbs-api-types/src/openid.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 01c14cc4..4247bba3 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -68,6 +68,9 @@ pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint}; pub mod file_restore; +mod openid; +pub use openid::*; + mod remote; pub use remote::*; diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs new file mode 100644 index 00000000..65967bd1 --- /dev/null +++ b/pbs-api-types/src/openid.rs @@ -0,0 +1,121 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{ + api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater, +}; + +use super::{ + PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA, + SINGLE_LINE_COMMENT_SCHEMA, +}; + +pub const OPENID_SCOPE_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); + +pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.") + .format(&OPENID_SCOPE_FORMAT) + .schema(); + +pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema(); + +pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat = + ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA); + +pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile"; +pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List") + .format(&OPENID_SCOPE_LIST_FORMAT) + .default(OPENID_DEFAILT_SCOPE_LIST) + .schema(); + +pub const OPENID_ACR_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); + +pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.") + .format(&OPENID_SCOPE_FORMAT) + .schema(); + +pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new( + "Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema(); + +pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat = + ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA); + +pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List") + .format(&OPENID_ACR_LIST_FORMAT) + .schema(); + +pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new( + "Use the value of this attribute/claim as unique user name. It \ + is up to the identity provider to guarantee the uniqueness. The \ + OpenID specification only guarantees that Subject ('sub') is \ + unique. Also make sure that the user is not allowed to change that \ + attribute by himself!") + .max_length(64) + .min_length(1) + .format(&PROXMOX_SAFE_ID_FORMAT) .schema(); + +#[api( + properties: { + realm: { + schema: REALM_ID_SCHEMA, + }, + "client-key": { + optional: true, + }, + "scopes": { + schema: OPENID_SCOPE_LIST_SCHEMA, + optional: true, + }, + "acr-values": { + schema: OPENID_ACR_LIST_SCHEMA, + optional: true, + }, + prompt: { + description: "OpenID Prompt", + type: String, + format: &PROXMOX_SAFE_ID_FORMAT, + optional: true, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + autocreate: { + optional: true, + default: false, + }, + "username-claim": { + schema: OPENID_USERNAME_CLAIM_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Updater)] +#[serde(rename_all="kebab-case")] +/// OpenID configuration properties. +pub struct OpenIdRealmConfig { + #[updater(skip)] + pub realm: String, + /// OpenID Issuer Url + pub issuer_url: String, + /// OpenID Client ID + pub client_id: String, + #[serde(skip_serializing_if="Option::is_none")] + pub scopes: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub acr_values: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub prompt: Option, + /// OpenID Client Key + #[serde(skip_serializing_if="Option::is_none")] + pub client_key: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub comment: Option, + /// Automatically create users if they do not exist. + #[serde(skip_serializing_if="Option::is_none")] + pub autocreate: Option, + #[updater(skip)] + #[serde(skip_serializing_if="Option::is_none")] + pub username_claim: Option, +} From e33f41c72cee6a5e73ec11e5739d6f9100b755d8 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 18 Nov 2021 13:43:41 +0100 Subject: [PATCH 063/299] use proxmox::tools::fd::fd_change_cloexec from proxmox 0.15.3 Depend on proxmox 0.15.3 Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 4b188c07..925207e8 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -15,7 +15,7 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = "0.15.0" +proxmox = "0.15.3" proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } proxmox-time = "1.1" From adcf38948e67d505754e7bdbdc54becc3603af22 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Fri, 19 Nov 2021 13:48:38 +0100 Subject: [PATCH 064/299] move HumanByte to pbs-abi-types crate Originally-by: Dietmar Maurer Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/human_byte.rs | 50 +++++++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 3 ++ 2 files changed, 53 insertions(+) create mode 100644 pbs-api-types/src/human_byte.rs diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs new file mode 100644 index 00000000..a82d8fe8 --- /dev/null +++ b/pbs-api-types/src/human_byte.rs @@ -0,0 +1,50 @@ +pub struct HumanByte { + b: usize, +} +impl std::fmt::Display for HumanByte { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.b < 1024 { + return write!(f, "{} B", self.b); + } + let kb: f64 = self.b as f64 / 1024.0; + if kb < 1024.0 { + return write!(f, "{:.2} KiB", kb); + } + let mb: f64 = kb / 1024.0; + if mb < 1024.0 { + return write!(f, "{:.2} MiB", mb); + } + let gb: f64 = mb / 1024.0; + if gb < 1024.0 { + return write!(f, "{:.2} GiB", gb); + } + let tb: f64 = gb / 1024.0; + if tb < 1024.0 { + return write!(f, "{:.2} TiB", tb); + } + let pb: f64 = tb / 1024.0; + return write!(f, "{:.2} PiB", pb); + } +} +impl From for HumanByte { + fn from(v: usize) -> Self { + HumanByte { b: v } + } +} +impl From for HumanByte { + fn from(v: u64) -> Self { + HumanByte { b: v as usize } + } +} + +#[test] +fn correct_byte_convert() { + fn convert(b: usize) -> String { + HumanByte::from(b).to_string() + } + assert_eq!(convert(1023), "1023 B"); + assert_eq!(convert(1 << 10), "1.00 KiB"); + assert_eq!(convert(1 << 20), "1.00 MiB"); + assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.10 GiB"); + assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.49 PiB"); +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4247bba3..a28ddafd 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -39,6 +39,9 @@ pub use acl::*; mod datastore; pub use datastore::*; +mod human_byte; +pub use human_byte::HumanByte; + mod jobs; pub use jobs::*; From 52bfb8e819f04ea4885d3e68f1e9f6c815faf26e Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sat, 20 Nov 2021 17:32:16 +0100 Subject: [PATCH 065/299] human byte: add proper unit type and support base-10 The new SizeUnit type takes over the auto scaling logic and could be used on its own too. Switch the internal type of HumanByte from u64 to f64, this results in a slight reduce of usable sizes we can represent (there's no unsigned float type after all) but we support pebibyte now with quite the precision and ebibytes should be also work out ok, and that really should us have covered for a while.. Partially adapted by Dietmar's version, but split up and change so: * there's no None type, for a SizeUnit that does not makes much sense * print the unit for byte too, better consistency and one can still use as_u64() or as_f64() if they do not want/need the unit rendered * left the "From usize/u64" impls intact, just convenient to have and avoids all over the tree changes to adapt to loosing that * move auto-scaling into SizeUnit, good fit there and I could see some re-use potential for non-human-byte users in the future * impl Display for SizeUnit instead of the separate unit_str method, better usability as it can be used directly in format (with zero alloc/copy) and saw no real reason of not having that this way * switch the place where we auto-scale in HumanByte's to the new_X helpers which allows for slightly reduced code usage and simplify implementation where possible * use rounding for the precision limit algorithm. This is a stupid problem as in practices there are cases for requiring every variant: - flooring would be good for limits, better less than to much - ceiling would be good for file sizes, to less can mean ENOSPACE and user getting angry if their working value is messed with - rounding can be good for rendering benchmark, closer to reality and no real impact So going always for rounding is really not the best solution.. Some of those changes where naturally opinionated, if there's a good practical reason we can switch back (or to something completely different). The single thing I kept and am not _that_ happy with is being able to have fractional bytes (1.1 B or even 0.01 B), which just does not makes much sense as most of those values cannot exist at all in reality - I say most as multiple of 1/8 Byte can exists, those are bits.o Note, the precission also changed from fixed 2 to max 3 (trailing zeros stripped), while that can be nice we should see if we get a better precision limiting algorithm, e.g., directly in the printer. Rust sadly does not supports "limit to precision of 3 but avoid trailing zeros" so we'd need to adapt their Grisu based algorithm our own - way to much complexity for this though.. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/human_byte.rs | 222 +++++++++++++++++++++++++++----- 1 file changed, 189 insertions(+), 33 deletions(-) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index a82d8fe8..413df35d 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -1,50 +1,206 @@ -pub struct HumanByte { - b: usize, +use anyhow::{bail, Error}; + +/// Size units for byte sizes +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum SizeUnit { + Byte, + // SI (base 10) + KByte, + MByte, + GByte, + TByte, + PByte, + // IEC (base 2) + Kibi, + Mebi, + Gibi, + Tebi, + Pebi, } -impl std::fmt::Display for HumanByte { + +impl SizeUnit { + /// Returns the scaling factor + pub fn factor(&self) -> f64 { + match self { + SizeUnit::Byte => 1.0, + // SI (base 10) + SizeUnit::KByte => 1_000.0, + SizeUnit::MByte => 1_000_000.0, + SizeUnit::GByte => 1_000_000_000.0, + SizeUnit::TByte => 1_000_000_000_000.0, + SizeUnit::PByte => 1_000_000_000_000_000.0, + // IEC (base 2) + SizeUnit::Kibi => 1024.0, + SizeUnit::Mebi => 1024.0 * 1024.0, + SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0, + SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0, + SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0, + } + } + + /// gets the biggest possible unit still having a value greater zero before the decimal point + /// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones + pub fn auto_scale(size: f64, binary: bool) -> SizeUnit { + if binary { + let bits = 63 - (size as u64).leading_zeros(); + match bits { + 50.. => SizeUnit::Pebi, + 40..=49 => SizeUnit::Tebi, + 30..=39 => SizeUnit::Gibi, + 20..=29 => SizeUnit::Mebi, + 10..=19 => SizeUnit::Kibi, + _ => SizeUnit::Byte, + } + } else { + if size >= 1_000_000_000_000_000.0 { + SizeUnit::PByte + } else if size >= 1_000_000_000_000.0 { + SizeUnit::TByte + } else if size >= 1_000_000_000.0 { + SizeUnit::GByte + } else if size >= 1_000_000.0 { + SizeUnit::MByte + } else if size >= 1_000.0 { + SizeUnit::KByte + } else { + SizeUnit::Byte + } + } + } +} + +/// Returns the string repesentation +impl std::fmt::Display for SizeUnit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.b < 1024 { - return write!(f, "{} B", self.b); + match self { + SizeUnit::Byte => write!(f, "B"), + // SI (base 10) + SizeUnit::KByte => write!(f, "KB"), + SizeUnit::MByte => write!(f, "MB"), + SizeUnit::GByte => write!(f, "GB"), + SizeUnit::TByte => write!(f, "TB"), + SizeUnit::PByte => write!(f, "PB"), + // IEC (base 2) + SizeUnit::Kibi => write!(f, "KiB"), + SizeUnit::Mebi => write!(f, "MiB"), + SizeUnit::Gibi => write!(f, "GiB"), + SizeUnit::Tebi => write!(f, "TiB"), + SizeUnit::Pebi => write!(f, "PiB"), } - let kb: f64 = self.b as f64 / 1024.0; - if kb < 1024.0 { - return write!(f, "{:.2} KiB", kb); + } +} + +/// Byte size which can be displayed in a human friendly way +pub struct HumanByte { + /// The siginficant value, it does not includes any factor of the `unit` + size: f64, + /// The scale/unit of the value + unit: SizeUnit, +} + +impl HumanByte { + /// Create instance with size and unit (size must be positive) + pub fn with_unit(size: f64, unit: SizeUnit) -> Result { + if size < 0.0 { + bail!("byte size may not be negative"); } - let mb: f64 = kb / 1024.0; - if mb < 1024.0 { - return write!(f, "{:.2} MiB", mb); - } - let gb: f64 = mb / 1024.0; - if gb < 1024.0 { - return write!(f, "{:.2} GiB", gb); - } - let tb: f64 = gb / 1024.0; - if tb < 1024.0 { - return write!(f, "{:.2} TiB", tb); - } - let pb: f64 = tb / 1024.0; - return write!(f, "{:.2} PiB", pb); + Ok(HumanByte { size, unit }) + } + + /// Create a new instance with optimal binary unit computed + pub fn new_binary(size: f64) -> Self { + let unit = SizeUnit::auto_scale(size, true); + HumanByte { size: size / unit.factor(), unit } + } + + /// Create a new instance with optimal decimal unit computed + pub fn new_decimal(size: f64) -> Self { + let unit = SizeUnit::auto_scale(size, false); + HumanByte { size: size / unit.factor(), unit } + } + + /// Returns the size as u64 number of bytes + pub fn as_u64(&self) -> u64 { + self.as_f64() as u64 + } + + /// Returns the size as f64 number of bytes + pub fn as_f64(&self) -> f64 { + self.size * self.unit.factor() + } + + /// Returns a copy with optimal binary unit computed + pub fn auto_scale_binary(self) -> Self { + HumanByte::new_binary(self.as_f64()) + } + + /// Returns a copy with optimal decimal unit computed + pub fn auto_scale_decimal(self) -> Self { + HumanByte::new_decimal(self.as_f64()) + } +} + +impl From for HumanByte { + fn from(v: u64) -> Self { + HumanByte::new_binary(v as f64) } } impl From for HumanByte { fn from(v: usize) -> Self { - HumanByte { b: v } + HumanByte::new_binary(v as f64) } } -impl From for HumanByte { - fn from(v: u64) -> Self { - HumanByte { b: v as usize } + +impl std::fmt::Display for HumanByte { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let precision = f.precision().unwrap_or(3) as f64; + let precision_factor = 1.0 * 10.0_f64.powf(precision); + // this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet + let size = ((self.size * precision_factor).round()) / precision_factor; + write!(f, "{} {}", size, self.unit) } } #[test] -fn correct_byte_convert() { - fn convert(b: usize) -> String { +fn test_human_byte_auto_unit_decimal() { + fn convert(b: u64) -> String { + HumanByte::new_decimal(b as f64).to_string() + } + assert_eq!(convert(987), "987 B"); + assert_eq!(convert(1022), "1.022 KB"); + assert_eq!(convert(9_000), "9 KB"); + assert_eq!(convert(1_000), "1 KB"); + assert_eq!(convert(1_000_000), "1 MB"); + assert_eq!(convert(1_000_000_000), "1 GB"); + assert_eq!(convert(1_000_000_000_000), "1 TB"); + assert_eq!(convert(1_000_000_000_000_000), "1 PB"); + + assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB"); + assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB"); + assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB"); +} + +#[test] +fn test_human_byte_auto_unit_binary() { + fn convert(b: u64) -> String { HumanByte::from(b).to_string() } - assert_eq!(convert(1023), "1023 B"); - assert_eq!(convert(1 << 10), "1.00 KiB"); - assert_eq!(convert(1 << 20), "1.00 MiB"); - assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.10 GiB"); - assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.49 PiB"); + assert_eq!(convert(987), "987 B"); + assert_eq!(convert(1022), "1022 B"); + assert_eq!(convert(9_000), "8.789 KiB"); + assert_eq!(convert(10_000_000), "9.537 MiB"); + assert_eq!(convert(10_000_000_000), "9.313 GiB"); + assert_eq!(convert(10_000_000_000_000), "9.095 TiB"); + + assert_eq!(convert(1 << 10), "1 KiB"); + assert_eq!(convert((1 << 10) * 10), "10 KiB"); + assert_eq!(convert(1 << 20), "1 MiB"); + assert_eq!(convert(1 << 30), "1 GiB"); + assert_eq!(convert(1 << 40), "1 TiB"); + assert_eq!(convert(1 << 50), "1 PiB"); + + assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB"); + assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB"); + assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB"); + assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB"); } From 8ced6983048f84b22aaa8b8395b1bc6c705cbace Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sat, 20 Nov 2021 18:21:33 +0100 Subject: [PATCH 066/299] human byte: add from string parser Adapted from Dietmar's v3 on pbs-devel but some changes: - reworked with a strip_suffix fn that does matching, way shorter and even easier to read IMO - make b/B byte symbol fully optional, not just for base-10 - also trim trailing whitespace for SizeUnit::Byte - simplify the FromStr impl - adapt parser unit tests such that we actually see the failed test's definition line, simplifies debugging a bit Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/human_byte.rs | 112 ++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 413df35d..e8ea27d0 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -90,6 +90,33 @@ impl std::fmt::Display for SizeUnit { } } +/// Strips a trailing SizeUnit inclusive trailing whitespace +/// Supports both IEC and SI based scales, the B/b byte symbol is optional. +fn strip_unit(v: &str) -> (&str, SizeUnit) { + let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway + + let (v, binary) = match v.strip_suffix('i') { + Some(n) => (n, true), + None => (v, false), + }; + + let mut unit = SizeUnit::Byte; + (v.strip_suffix(|c: char| match c { + 'k' | 'K' if !binary => { unit = SizeUnit::KByte; true } + 'm' | 'M' if !binary => { unit = SizeUnit::MByte; true } + 'g' | 'G' if !binary => { unit = SizeUnit::GByte; true } + 't' | 'T' if !binary => { unit = SizeUnit::TByte; true } + 'p' | 'P' if !binary => { unit = SizeUnit::PByte; true } + // binary (IEC recommended) variants + 'k' | 'K' if binary => { unit = SizeUnit::Kibi; true } + 'm' | 'M' if binary => { unit = SizeUnit::Mebi; true } + 'g' | 'G' if binary => { unit = SizeUnit::Gibi; true } + 't' | 'T' if binary => { unit = SizeUnit::Tebi; true } + 'p' | 'P' if binary => { unit = SizeUnit::Pebi; true } + _ => false + }).unwrap_or(v).trim_end(), unit) +} + /// Byte size which can be displayed in a human friendly way pub struct HumanByte { /// The siginficant value, it does not includes any factor of the `unit` @@ -161,6 +188,91 @@ impl std::fmt::Display for HumanByte { } } +impl std::str::FromStr for HumanByte { + type Err = Error; + + fn from_str(v: &str) -> Result { + let (v, unit) = strip_unit(v); + HumanByte::with_unit(v.parse()?, unit) + } +} + +#[test] +fn test_human_byte_parser() -> Result<(), Error> { + assert!("-10".parse::().is_err()); // negative size + + fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> { + let h: HumanByte = v.parse()?; + + if h.size != size { + bail!("got unexpected size for '{}' ({} != {})", v, h.size, size); + } + if h.unit != unit { + bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit); + } + + let new = h.to_string(); + if &new != as_str { + bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str); + } + Ok(()) + } + fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool { + match do_test(v, size, unit, as_str) { + Ok(_) => true, + Err(err) => { + eprintln!("{}", err); // makes debugging easier + false + } + } + } + + assert!(test("14", 14.0, SizeUnit::Byte, "14 B")); + assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B")); + assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B")); + assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B")); + assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B")); + + let h: HumanByte = "1.2345678".parse()?; + assert_eq!(&format!("{:.0}", h), "1 B"); + assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit + assert_eq!(&format!("{:.1}", h), "1.2 B"); + assert_eq!(&format!("{:.2}", h), "1.23 B"); + assert_eq!(&format!("{:.3}", h), "1.235 B"); + assert_eq!(&format!("{:.4}", h), "1.2346 B"); + assert_eq!(&format!("{:.5}", h), "1.23457 B"); + assert_eq!(&format!("{:.6}", h), "1.234568 B"); + assert_eq!(&format!("{:.7}", h), "1.2345678 B"); + assert_eq!(&format!("{:.8}", h), "1.2345678 B"); + + assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B")); + + assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B")); + assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B")); + assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B")); + assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B")); + + assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB")); + assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB")); + assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB")); + + assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB")); + + assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB")); + assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB")); + + assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB")); + assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB")); + assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB")); + + assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB")); + assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB")); + + assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB")); + + Ok(()) +} + #[test] fn test_human_byte_auto_unit_decimal() { fn convert(b: u64) -> String { From 8dbc29bf7a9ee1928f6709fbb06dfa3fae77a2b4 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sat, 20 Nov 2021 18:21:53 +0100 Subject: [PATCH 067/299] human byte: make proper proxmox API type Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/human_byte.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index e8ea27d0..142a0d4e 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -1,5 +1,7 @@ use anyhow::{bail, Error}; +use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType}; + /// Size units for byte sizes #[derive(Debug, Copy, Clone, PartialEq)] pub enum SizeUnit { @@ -118,6 +120,7 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) { } /// Byte size which can be displayed in a human friendly way +#[derive(Debug, Copy, Clone, UpdaterType)] pub struct HumanByte { /// The siginficant value, it does not includes any factor of the `unit` size: f64, @@ -125,6 +128,22 @@ pub struct HumanByte { unit: SizeUnit, } +fn verify_human_byte(s: &str) -> Result<(), Error> { + match s.parse::() { + Ok(_) => Ok(()), + Err(err) => bail!("byte-size parse error for '{}': {}", s, err), + } +} +impl ApiType for HumanByte { + const API_SCHEMA: Schema = StringSchema::new( + "Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", + ) + .format(&ApiStringFormat::VerifyFn(verify_human_byte)) + .min_length(1) + .max_length(64) + .schema(); +} + impl HumanByte { /// Create instance with size and unit (size must be positive) pub fn with_unit(size: f64, unit: SizeUnit) -> Result { @@ -197,6 +216,9 @@ impl std::str::FromStr for HumanByte { } } +proxmox::forward_deserialize_to_from_str!(HumanByte); +proxmox::forward_serialize_to_display!(HumanByte); + #[test] fn test_human_byte_parser() -> Result<(), Error> { assert!("-10".parse::().is_err()); // negative size From a70a8ef32e463063d58222844e83fa6c28b86d83 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 18 Nov 2021 08:29:22 +0100 Subject: [PATCH 068/299] use HumanByte for traffic-control config Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/traffic_control.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 0dd7ed58..210f53ac 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater}; use crate::{ - CIDR_SCHEMA, DAILY_DURATION_FORMAT, + HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -38,19 +38,19 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( schema: SINGLE_LINE_COMMENT_SCHEMA, }, "rate-in": { - schema: TRAFFIC_CONTROL_RATE_SCHEMA, + type: HumanByte, optional: true, }, "burst-in": { - schema: TRAFFIC_CONTROL_BURST_SCHEMA, + type: HumanByte, optional: true, }, "rate-out": { - schema: TRAFFIC_CONTROL_RATE_SCHEMA, + type: HumanByte, optional: true, }, "burst-out": { - schema: TRAFFIC_CONTROL_BURST_SCHEMA, + type: HumanByte, optional: true, }, network: { @@ -79,13 +79,13 @@ pub struct TrafficControlRule { /// Rule applies to Source IPs within this networks pub network: Vec, #[serde(skip_serializing_if="Option::is_none")] - pub rate_in: Option, + pub rate_in: Option, #[serde(skip_serializing_if="Option::is_none")] - pub burst_in: Option, + pub burst_in: Option, #[serde(skip_serializing_if="Option::is_none")] - pub rate_out: Option, + pub rate_out: Option, #[serde(skip_serializing_if="Option::is_none")] - pub burst_out: Option, + pub burst_out: Option, // fixme: expose this? // /// Bandwidth is shared accross all connections // #[serde(skip_serializing_if="Option::is_none")] From f2a761f9b1e75ed11a04b4d1f2a03356b552ec2e Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 21 Nov 2021 09:13:02 +0100 Subject: [PATCH 069/299] pbs-api-types: fix HumanByte::auto_scale --- pbs-api-types/src/human_byte.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 142a0d4e..895f029b 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -44,13 +44,13 @@ impl SizeUnit { /// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones pub fn auto_scale(size: f64, binary: bool) -> SizeUnit { if binary { - let bits = 63 - (size as u64).leading_zeros(); + let bits = 64 - (size as u64).leading_zeros(); match bits { - 50.. => SizeUnit::Pebi, - 40..=49 => SizeUnit::Tebi, - 30..=39 => SizeUnit::Gibi, - 20..=29 => SizeUnit::Mebi, - 10..=19 => SizeUnit::Kibi, + 51.. => SizeUnit::Pebi, + 41..=50 => SizeUnit::Tebi, + 31..=40 => SizeUnit::Gibi, + 21..=30 => SizeUnit::Mebi, + 11..=20 => SizeUnit::Kibi, _ => SizeUnit::Byte, } } else { @@ -319,6 +319,7 @@ fn test_human_byte_auto_unit_binary() { fn convert(b: u64) -> String { HumanByte::from(b).to_string() } + assert_eq!(convert(0), "0 B"); assert_eq!(convert(987), "987 B"); assert_eq!(convert(1022), "1022 B"); assert_eq!(convert(9_000), "8.789 KiB"); From 9815d90136df3f3f2b7da862bcb9affb694d9c11 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 21 Nov 2021 10:20:41 +0100 Subject: [PATCH 070/299] pbs-api-types: split out type RateLimitConfig --- pbs-api-types/src/traffic_control.rs | 56 ++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 210f53ac..a1fcb7b5 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -30,13 +30,6 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( #[api( properties: { - name: { - schema: TRAFFIC_CONTROL_ID_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, "rate-in": { type: HumanByte, optional: true, @@ -53,6 +46,45 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( type: HumanByte, optional: true, }, + }, +)] +#[derive(Serialize,Deserialize,Default,Clone,Updater)] +#[serde(rename_all = "kebab-case")] +/// Rate Limit Configuration +pub struct RateLimitConfig { + #[serde(skip_serializing_if="Option::is_none")] + pub rate_in: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub burst_in: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub rate_out: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub burst_out: Option, +} + +impl RateLimitConfig { + pub fn with_same_inout(rate: Option, burst: Option) -> Self { + Self { + rate_in: rate, + burst_in: burst, + rate_out: rate, + burst_out: burst, + } + } +} + +#[api( + properties: { + name: { + schema: TRAFFIC_CONTROL_ID_SCHEMA, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + limit: { + type: RateLimitConfig, + }, network: { type: Array, items: { @@ -78,14 +110,8 @@ pub struct TrafficControlRule { pub comment: Option, /// Rule applies to Source IPs within this networks pub network: Vec, - #[serde(skip_serializing_if="Option::is_none")] - pub rate_in: Option, - #[serde(skip_serializing_if="Option::is_none")] - pub burst_in: Option, - #[serde(skip_serializing_if="Option::is_none")] - pub rate_out: Option, - #[serde(skip_serializing_if="Option::is_none")] - pub burst_out: Option, + #[serde(flatten)] + pub limit: RateLimitConfig, // fixme: expose this? // /// Bandwidth is shared accross all connections // #[serde(skip_serializing_if="Option::is_none")] From 2984877c5e60282f49afb8dda85251647d902b82 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 21 Nov 2021 10:29:58 +0100 Subject: [PATCH 071/299] sync-job: add rate limit Signed-off-by: Dietmar Maurer --- pbs-api-types/src/jobs.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 2a7e201b..fe2383a5 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,7 +7,8 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, + Userid, Authid, RateLimitConfig, + REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA, BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA, }; @@ -405,6 +406,9 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group fil optional: true, schema: SINGLE_LINE_COMMENT_SCHEMA, }, + limit: { + type: RateLimitConfig, + }, schedule: { optional: true, schema: SYNC_SCHEDULE_SCHEMA, @@ -434,6 +438,8 @@ pub struct SyncJobConfig { pub schedule: Option, #[serde(skip_serializing_if="Option::is_none")] pub group_filter: Option>, + #[serde(flatten)] + pub limit: RateLimitConfig, } #[api( From 27e98af425235cdfd37ec6748afe6bfaea71893f Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 16 Nov 2021 10:21:05 +0100 Subject: [PATCH 072/299] set default for 'protected' flag otherwise we cannot properly parse the api return value from older versions, since that field does not exist there. fixes sync from older versions without the protected feature Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index b1dd09d4..693ddfb8 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -396,6 +396,7 @@ pub struct SnapshotListItem { #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, /// Protection from prunes + #[serde(default)] pub protected: bool, } From f6799e08af8555cc14cdcf09de5acd0736428c13 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 22 Nov 2021 08:19:09 +0100 Subject: [PATCH 073/299] Fingerprint: add new signature method commit 8c1ec5c8021b11d4ef657a55c67b060045e6ebdc introcuded a bug by using fp.to_string(). Replace this with fp.signature() which correctly returns the full fingerprint instead of the short version. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/crypto.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs index eda92e23..6f931f8f 100644 --- a/pbs-api-types/src/crypto.rs +++ b/pbs-api-types/src/crypto.rs @@ -33,6 +33,9 @@ impl Fingerprint { pub fn bytes(&self) -> &[u8; 32] { &self.bytes } + pub fn signature(&self) -> String { + as_fingerprint(&self.bytes) + } } /// Display as short key ID From 14f389d563ed20c7655dc1513e128b36d4d04d17 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 23 Nov 2021 17:57:00 +0100 Subject: [PATCH 074/299] update to proxmox-sys 0.2 crate - imported pbs-api-types/src/common_regex.rs from old proxmox crate - use hex crate to generate/parse hex digest - remove all reference to proxmox crate (use proxmox-sys and proxmox-serde instead) Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 4 +- pbs-api-types/src/common_regex.rs | 78 ++++++++++++++++++++++++ pbs-api-types/src/crypto.rs | 3 +- pbs-api-types/src/human_byte.rs | 4 +- pbs-api-types/src/jobs.rs | 4 +- pbs-api-types/src/lib.rs | 5 +- pbs-api-types/src/remote.rs | 2 +- pbs-api-types/src/tape/media_location.rs | 4 +- pbs-api-types/src/userid.rs | 8 +-- 9 files changed, 97 insertions(+), 15 deletions(-) create mode 100644 pbs-api-types/src/common_regex.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 925207e8..3bee23ba 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -15,8 +15,10 @@ openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } -proxmox = "0.15.3" proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } +proxmox-serde = "0.1" proxmox-time = "1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } + +proxmox-sys = "0.2" # only needed foör nodename()?? \ No newline at end of file diff --git a/pbs-api-types/src/common_regex.rs b/pbs-api-types/src/common_regex.rs new file mode 100644 index 00000000..8fe30673 --- /dev/null +++ b/pbs-api-types/src/common_regex.rs @@ -0,0 +1,78 @@ +//! Predefined Regular Expressions +//! +//! This is a collection of useful regular expressions + +use lazy_static::lazy_static; +use regex::Regex; + +#[rustfmt::skip] +#[macro_export] +macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") } +#[rustfmt::skip] +#[macro_export] +macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") } +#[rustfmt::skip] +#[macro_export] +macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) } + +/// Returns the regular expression string to match IPv4 addresses +#[rustfmt::skip] +#[macro_export] +macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) } + +/// Returns the regular expression string to match IPv6 addresses +#[rustfmt::skip] +#[macro_export] +macro_rules! IPV6RE { () => (concat!(r"(?:", + r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|", + r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|", + r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|", + r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))")) +} + +/// Returns the regular expression string to match IP addresses (v4 or v6) +#[rustfmt::skip] +#[macro_export] +macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) } + +/// Regular expression string to match IP addresses where IPv6 addresses require brackets around +/// them, while for IPv4 they are forbidden. +#[rustfmt::skip] +#[macro_export] +macro_rules! IPRE_BRACKET { () => ( + concat!(r"(?:", + IPV4RE!(), + r"|\[(?:", + IPV6RE!(), + r")\]", + r")")) +} + +lazy_static! { + pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap(); + pub static ref IP_BRACKET_REGEX: Regex = + Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap(); + pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap(); + pub static ref SYSTEMD_DATETIME_REGEX: Regex = + Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap(); +} + +#[test] +fn test_regexes() { + assert!(IP_REGEX.is_match("127.0.0.1")); + assert!(IP_REGEX.is_match("::1")); + assert!(IP_REGEX.is_match("2014:b3a::27")); + assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1")); + assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF")); + + assert!(IP_BRACKET_REGEX.is_match("127.0.0.1")); + assert!(IP_BRACKET_REGEX.is_match("[::1]")); + assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]")); + assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]")); + assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]")); +} diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs index 6f931f8f..bd817034 100644 --- a/pbs-api-types/src/crypto.rs +++ b/pbs-api-types/src/crypto.rs @@ -51,7 +51,8 @@ impl std::str::FromStr for Fingerprint { fn from_str(s: &str) -> Result { let mut tmp = s.to_string(); tmp.retain(|c| c != ':'); - let bytes = proxmox::tools::hex_to_digest(&tmp)?; + let mut bytes = [0u8; 32]; + hex::decode_to_slice(&tmp, &mut bytes)?; Ok(Fingerprint::new(bytes)) } } diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 895f029b..e5969875 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -216,8 +216,8 @@ impl std::str::FromStr for HumanByte { } } -proxmox::forward_deserialize_to_from_str!(HumanByte); -proxmox::forward_serialize_to_display!(HumanByte); +proxmox_serde::forward_deserialize_to_from_str!(HumanByte); +proxmox_serde::forward_serialize_to_display!(HumanByte); #[test] fn test_human_byte_parser() -> Result<(), Error> { diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index fe2383a5..d6479a98 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -365,8 +365,8 @@ impl std::fmt::Display for GroupFilter { } } -proxmox::forward_deserialize_to_from_str!(GroupFilter); -proxmox::forward_serialize_to_display!(GroupFilter); +proxmox_serde::forward_deserialize_to_from_str!(GroupFilter); +proxmox_serde::forward_serialize_to_display!(GroupFilter); fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { GroupFilter::from_str(input).map(|_| ()) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index a28ddafd..bb5d152f 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -3,10 +3,11 @@ use serde::{Deserialize, Serialize}; use anyhow::bail; +pub mod common_regex; + use proxmox_schema::{ api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, }; -use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; use proxmox_time::parse_daily_duration; #[rustfmt::skip] @@ -199,7 +200,7 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") .format(&ApiStringFormat::VerifyFn(|node| { - if node == "localhost" || node == proxmox::tools::nodename() { + if node == "localhost" || node == proxmox_sys::nodename() { Ok(()) } else { bail!("no such node '{}'", node); diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index 15c336b7..b7cee5d3 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -79,7 +79,7 @@ pub struct Remote { pub name: String, // Note: The stored password is base64 encoded #[serde(skip_serializing_if="String::is_empty")] - #[serde(with = "proxmox::tools::serde::string_as_base64")] + #[serde(with = "proxmox_serde::string_as_base64")] pub password: String, #[serde(flatten)] pub config: RemoteConfig, diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs index a05f1156..496bd000 100644 --- a/pbs-api-types/src/tape/media_location.rs +++ b/pbs-api-types/src/tape/media_location.rs @@ -22,8 +22,8 @@ pub enum MediaLocation { Vault(String), } -proxmox::forward_deserialize_to_from_str!(MediaLocation); -proxmox::forward_serialize_to_display!(MediaLocation); +proxmox_serde::forward_deserialize_to_from_str!(MediaLocation); +proxmox_serde::forward_serialize_to_display!(MediaLocation); impl proxmox_schema::ApiType for MediaLocation { const API_SCHEMA: Schema = StringSchema::new( diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 8c58da2e..60137d2a 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -685,8 +685,8 @@ fn test_token_id() { assert_eq!(auth_id.to_string(), "test@pam!bar".to_string()); } -proxmox::forward_deserialize_to_from_str!(Userid); -proxmox::forward_serialize_to_display!(Userid); +proxmox_serde::forward_deserialize_to_from_str!(Userid); +proxmox_serde::forward_serialize_to_display!(Userid); -proxmox::forward_deserialize_to_from_str!(Authid); -proxmox::forward_serialize_to_display!(Authid); +proxmox_serde::forward_deserialize_to_from_str!(Authid); +proxmox_serde::forward_serialize_to_display!(Authid); From 807e4743984a4b355d2c43d8f8504aa3a2357f7b Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 25 Nov 2021 11:48:52 +0100 Subject: [PATCH 075/299] move pbs-tools/src/percent_encoding.rs to pbs-api-types/src/percent_encoding.rs Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/lib.rs | 1 + pbs-api-types/src/percent_encoding.rs | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 pbs-api-types/src/percent_encoding.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 3bee23ba..cd5dc129 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -12,6 +12,7 @@ lazy_static = "1.4" libc = "0.2" nix = "0.19.1" openssl = "0.10" +percent-encoding = "2.1" regex = "1.2" serde = { version = "1.0", features = ["derive"] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index bb5d152f..2ecd5170 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use anyhow::bail; pub mod common_regex; +pub mod percent_encoding; use proxmox_schema::{ api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, diff --git a/pbs-api-types/src/percent_encoding.rs b/pbs-api-types/src/percent_encoding.rs new file mode 100644 index 00000000..afe011e2 --- /dev/null +++ b/pbs-api-types/src/percent_encoding.rs @@ -0,0 +1,22 @@ +use percent_encoding::{utf8_percent_encode, AsciiSet}; + +/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}` +pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e + // The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above) + .add(0x20) + .add(0x7f) + // the DEFAULT_ENCODE_SET added: + .add(b' ') + .add(b'"') + .add(b'#') + .add(b'<') + .add(b'>') + .add(b'`') + .add(b'?') + .add(b'{') + .add(b'}'); + +/// percent encode a url component +pub fn percent_encode_component(comp: &str) -> String { + utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string() +} From 7df207b52c97e3c346eee1d695551969e41ab7ce Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 25 Nov 2021 13:15:35 +0100 Subject: [PATCH 076/299] fix typo in comment Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index cd5dc129..e56269b0 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -22,4 +22,4 @@ proxmox-serde = "0.1" proxmox-time = "1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } -proxmox-sys = "0.2" # only needed foör nodename()?? \ No newline at end of file +proxmox-sys = "0.2" # only needed for nodename()?? From 7c04f0752584294cf9352e74979175c2d10b68cf Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 30 Nov 2021 13:12:09 +0100 Subject: [PATCH 077/299] remove use of deprecated functions from proxmox-time Depend on proxmox-time 1.1.1 Signed-off-by: Dominik Csapak Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/tape/media_pool.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index e56269b0..44e60d57 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -19,7 +19,7 @@ serde = { version = "1.0", features = ["derive"] } proxmox-lang = "1.0.0" proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } proxmox-serde = "0.1" -proxmox-time = "1.1" +proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-sys = "0.2" # only needed for nodename()?? diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs index 3b1cb0f5..c0cba2bd 100644 --- a/pbs-api-types/src/tape/media_pool.rs +++ b/pbs-api-types/src/tape/media_pool.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater}; -use proxmox_time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan}; +use proxmox_time::{CalendarEvent, TimeSpan}; use crate::{ PROXMOX_SAFE_ID_FORMAT, @@ -62,7 +62,7 @@ impl std::str::FromStr for MediaSetPolicy { return Ok(MediaSetPolicy::AlwaysCreate); } - let event = parse_calendar_event(s)?; + let event = s.parse()?; Ok(MediaSetPolicy::CreateAt(event)) } @@ -97,7 +97,7 @@ impl std::str::FromStr for RetentionPolicy { return Ok(RetentionPolicy::KeepForever); } - let time_span = parse_time_span(s)?; + let time_span = s.parse()?; Ok(RetentionPolicy::ProtectFor(time_span)) } From a717b137331ccfcb5c397a0ad2f675b60fbc2537 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 1 Dec 2021 09:08:25 +0100 Subject: [PATCH 078/299] pbs-api-types: removbe usused nix dependency Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 44e60d57..23b40c35 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -10,7 +10,6 @@ anyhow = "1.0" hex = "0.4.3" lazy_static = "1.4" libc = "0.2" -nix = "0.19.1" openssl = "0.10" percent-encoding = "2.1" regex = "1.2" From 5d570c4b5974b7588ad13f20db39f597cf26c052 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 1 Dec 2021 09:10:25 +0100 Subject: [PATCH 079/299] pbs-api-types: remove libc dependency Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 1 - pbs-api-types/src/network.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 23b40c35..50746c79 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -9,7 +9,6 @@ description = "general API type helpers for PBS" anyhow = "1.0" hex = "0.4.3" lazy_static = "1.4" -libc = "0.2" openssl = "0.10" percent-encoding = "2.1" regex = "1.2" diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index de27df7e..f2af3e6f 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -127,7 +127,7 @@ pub enum NetworkInterfaceType { pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.") .format(&NETWORK_INTERFACE_FORMAT) .min_length(1) - .max_length(libc::IFNAMSIZ-1) + .max_length(15) // libc::IFNAMSIZ-1 .schema(); pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new( From 4dac5642155a7a8ae29805763b9450ae5a630548 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 1 Dec 2021 09:28:47 +0100 Subject: [PATCH 080/299] pbs-api-types: remove openssl dependency for target wasm Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 4 +++- pbs-api-types/src/lib.rs | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 50746c79..d295893e 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -9,7 +9,6 @@ description = "general API type helpers for PBS" anyhow = "1.0" hex = "0.4.3" lazy_static = "1.4" -openssl = "0.10" percent-encoding = "2.1" regex = "1.2" serde = { version = "1.0", features = ["derive"] } @@ -21,3 +20,6 @@ proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } proxmox-sys = "0.2" # only needed for nodename()?? + +[target.'cfg(not(target_arch="wasm32"))'.dependencies] +openssl = "0.10" diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 2ecd5170..56a80313 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -326,6 +326,7 @@ pub struct RsaPubKeyInfo { pub length: usize, } +#[cfg(not(target_arch="wasm32"))] impl std::convert::TryFrom> for RsaPubKeyInfo { type Error = anyhow::Error; From 8995e899c801dc9f1a585ece38d3a08e63eb5789 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 1 Dec 2021 09:49:52 +0100 Subject: [PATCH 081/299] pbs-api-types: remove proxmox-sys dependency for target wasm Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 3 +-- pbs-api-types/src/lib.rs | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index d295893e..9b1461a9 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -19,7 +19,6 @@ proxmox-serde = "0.1" proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } -proxmox-sys = "0.2" # only needed for nodename()?? - [target.'cfg(not(target_arch="wasm32"))'.dependencies] +proxmox-sys = "0.2" # only needed for nodename()?? openssl = "0.10" diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 56a80313..0a0dd33d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -199,6 +199,7 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr .format(&DNS_NAME_OR_IP_FORMAT) .schema(); +#[cfg(not(target_arch="wasm32"))] // this only makes sense for the serever side pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") .format(&ApiStringFormat::VerifyFn(|node| { if node == "localhost" || node == proxmox_sys::nodename() { From 4ca47bc325e0d236d59d4f006314e6c4bb6877f6 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 15 Dec 2021 14:13:46 +0100 Subject: [PATCH 082/299] fix #3794: api types: set backup time lower limit to 1 Some users want to import historical backups but they run into the original lower backuo-time limit one can pass. That original limit was derived from the initial PBS development start in 2019, it was assumed that no older backup can exist with PBS before it existing, but imports of older backups is a legitimate thing. I pondered using 683071200 (1991-08-25), aka the first time Linux was publicly announced by Linus Torvalds as new limit but at the end I did not wanted to risk that and backup software is IMO to serious for such easter eggs, so I went for 1, to differ between the bogus 0 some tools fallback too if there's something off with time. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 693ddfb8..36279b3a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -55,7 +55,7 @@ pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.") .schema(); pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)") - .minimum(1_547_797_308) + .minimum(1) .schema(); pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group") From da2e372b19f0963c5d57e969b6b6056cbc9a8c6f Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 16 Dec 2021 11:02:53 +0100 Subject: [PATCH 083/299] cleanup schema function calls Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/jobs.rs | 4 ++-- pbs-api-types/src/tape/media_location.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index d6479a98..c6664104 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -345,8 +345,8 @@ impl std::str::FromStr for GroupFilter { fn from_str(s: &str) -> Result { match s.split_once(":") { - Some(("group", value)) => parse_simple_value(value, &BACKUP_GROUP_SCHEMA).map(|_| GroupFilter::Group(value.to_string())), - Some(("type", value)) => parse_simple_value(value, &BACKUP_TYPE_SCHEMA).map(|_| GroupFilter::BackupType(value.to_string())), + Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), + Some(("type", value)) => BACKUP_TYPE_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::BackupType(value.to_string())), Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), None => Err(format_err!("input doesn't match expected format '|regex:REGEX>'")), diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs index 496bd000..b81ea9a8 100644 --- a/pbs-api-types/src/tape/media_location.rs +++ b/pbs-api-types/src/tape/media_location.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Error}; -use proxmox_schema::{parse_simple_value, ApiStringFormat, Schema, StringSchema}; +use proxmox_schema::{ApiStringFormat, Schema, StringSchema}; use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; @@ -33,10 +33,10 @@ impl proxmox_schema::ApiType for MediaLocation { let location: MediaLocation = text.parse()?; match location { MediaLocation::Online(ref changer) => { - parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?; + CHANGER_NAME_SCHEMA.parse_simple_value(changer)?; } MediaLocation::Vault(ref vault) => { - parse_simple_value(vault, &VAULT_NAME_SCHEMA)?; + VAULT_NAME_SCHEMA.parse_simple_value(vault)?; } MediaLocation::Offline => { /* OK */ } } From 039d3374d75ac487afb8a097519a200acee02dc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 30 Dec 2021 12:57:37 +0100 Subject: [PATCH 084/299] tree-wide: fix needless borrows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit found and fixed via clippy Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/userid.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 60137d2a..7ee64fb0 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -556,10 +556,7 @@ impl Authid { } pub fn tokenname(&self) -> Option<&TokennameRef> { - match &self.tokenname { - Some(name) => Some(&name), - None => None, - } + self.tokenname.as_deref() } /// Get the "root@pam" auth id. From fb3fe5561e9683cfbf92cdf61446912ad5ce2994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 30 Dec 2021 13:55:54 +0100 Subject: [PATCH 085/299] use schema verify methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the old, deprecated ones only forward to these anyway. Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 9b1461a9..b40a707c 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ regex = "1.2" serde = { version = "1.0", features = ["derive"] } proxmox-lang = "1.0.0" -proxmox-schema = { version = "1.0.1", features = [ "api-macro" ] } +proxmox-schema = { version = "1.1", features = [ "api-macro" ] } proxmox-serde = "0.1" proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } From 588001cf8d46a4ff27ed8c6f945e0f8beea06948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 12 Jan 2022 14:52:09 +0100 Subject: [PATCH 086/299] api-types: move RsaPubKeyInfo to pbs-client MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit it's the only thing requiring openssl in pbs-api-types, and it's only used by the client to pretty-print the 'master' key, which is client-specific. Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 1 - pbs-api-types/src/lib.rs | 33 --------------------------------- 2 files changed, 34 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index b40a707c..09107ace 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -21,4 +21,3 @@ proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } [target.'cfg(not(target_arch="wasm32"))'.dependencies] proxmox-sys = "0.2" # only needed for nodename()?? -openssl = "0.10" diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 0a0dd33d..26bef33d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -312,39 +312,6 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") .schema(); -#[api] -#[derive(Deserialize, Serialize)] -/// RSA public key information -pub struct RsaPubKeyInfo { - /// Path to key (if stored in a file) - #[serde(skip_serializing_if="Option::is_none")] - pub path: Option, - /// RSA exponent - pub exponent: String, - /// Hex-encoded RSA modulus - pub modulus: String, - /// Key (modulus) length in bits - pub length: usize, -} - -#[cfg(not(target_arch="wasm32"))] -impl std::convert::TryFrom> for RsaPubKeyInfo { - type Error = anyhow::Error; - - fn try_from(value: openssl::rsa::Rsa) -> Result { - let modulus = value.n().to_hex_str()?.to_string(); - let exponent = value.e().to_dec_str()?.to_string(); - let length = value.size() as usize * 8; - - Ok(Self { - path: None, - exponent, - modulus, - length, - }) - } -} - #[api()] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] From d069c91e73448337568817340551ff1f79f18972 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 12 Jan 2022 14:52:10 +0100 Subject: [PATCH 087/299] api-types: relax NODENAME_SCHEMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit there isn't really a concept of 'nodes' in PBS (yet) anyway - and if there ever is, it needs to be handled by the rest-server / specific API endpoints (like in PVE), and not by the schema. this allows dropping proxmox-sys from pbs-api-types (and thus nix and some other transitive deps as well). Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 3 --- pbs-api-types/src/lib.rs | 11 ++--------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 09107ace..e77d8bc4 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -18,6 +18,3 @@ proxmox-schema = { version = "1.1", features = [ "api-macro" ] } proxmox-serde = "0.1" proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } - -[target.'cfg(not(target_arch="wasm32"))'.dependencies] -proxmox-sys = "0.2" # only needed for nodename()?? diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 26bef33d..e3c3df11 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,7 +1,6 @@ //! Basic API types used by most of the PBS code. use serde::{Deserialize, Serialize}; -use anyhow::bail; pub mod common_regex; pub mod percent_encoding; @@ -199,15 +198,9 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr .format(&DNS_NAME_OR_IP_FORMAT) .schema(); -#[cfg(not(target_arch="wasm32"))] // this only makes sense for the serever side + pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") - .format(&ApiStringFormat::VerifyFn(|node| { - if node == "localhost" || node == proxmox_sys::nodename() { - Ok(()) - } else { - bail!("no such node '{}'", node); - } - })) + .format(&HOSTNAME_FORMAT) .schema(); pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( From a8d5bc32ca7082ce74b395545b6b1dd08dfa1df6 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 11 Jan 2022 12:39:35 +0100 Subject: [PATCH 088/299] config: add tls ciphers to NodeConfig for TLS 1.3 and for TLS <= 1.2 Signed-off-by: Hannes Laimer --- pbs-api-types/src/lib.rs | 41 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index e3c3df11..4ef8eea1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -99,6 +99,20 @@ mod local_macros { macro_rules! DNS_ALIAS_NAME { () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")")) } + macro_rules! OPENSSL_CIPHERSUITE_RE { + () => ( + r"TLS_AES_256_GCM_SHA384|TLS_CHACHA20_POLY1305_SHA256|TLS_AES_128_GCM_SHA256|TLS_AES_128_CCM_8_SHA256|TLS_AES_128_CCM_SHA256" + ) + } + macro_rules! OPENSSL_CIPHER_STRING_RE { + () => (concat!( + r"([!\-+]?(COMPLEMENTOFDEFAULT|ALL|COMPLEMENTOFALL|HIGH|MEDIUM|LOW|[ae]?NULL|[ka]?RSA|", + "kDH[rdE]?|kEDH|DHE?|EDH|ADH|kEECDH|kECDHE|ECDH|ECDHE|EECDH|AECDH|a?DSS|aDH|a?ECDSA|", + "SSLv3|AES(128|256)?|GCM|AESGCM|AESCCM|AESCCM8|ARIA(128|256)?|CAMELLIA(128|256)?|", + "CHACHA20|3?DES|RC[24]|IDEA|SEED|MD5|SHA(1|256|384)?|aGOST(01)?|kGOST|GOST94|GOST89MAC|", + "[ak]?PSK|kECDHEPSK|kDHEPSK|kRSAPSK|SUITEB(128|128ONLY|192)?|CBC3?|POLY1305))+" + )) + } } const_regex! { @@ -123,6 +137,22 @@ const_regex! { pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; + pub OPENSSL_CIPHERS_TLS_1_2_REGEX = concat!( + r"^((", + OPENSSL_CIPHER_STRING_RE!(), + ")([: ,](", + OPENSSL_CIPHER_STRING_RE!(), + "))*)$" + ); + + pub OPENSSL_CIPHERS_TLS_1_3_REGEX = concat!( + r"^((", + OPENSSL_CIPHERSUITE_RE!(), + ")(:(", + OPENSSL_CIPHERSUITE_RE!(), + "))*)$" + ); + /// Regex for safe identifiers. /// /// This @@ -159,6 +189,9 @@ pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&B pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); +pub const OPENSSL_CIPHERS_TLS_1_2_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_TLS_1_2_REGEX); +pub const OPENSSL_CIPHERS_TLS_1_3_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_TLS_1_3_REGEX); + pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); @@ -188,6 +221,14 @@ pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in R .format(&HOSTNAME_FORMAT) .schema(); +pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = StringSchema::new("OpenSSL cipher string list used by the proxy for TLS <= 1.2") + .format(&OPENSSL_CIPHERS_TLS_1_2_FORMAT) + .schema(); + +pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLSv1.3") + .format(&OPENSSL_CIPHERS_TLS_1_3_FORMAT) + .schema(); + pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX); From 4e854d32f0e41234fa7430c4ad97458546b2eee7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 13 Jan 2022 10:16:15 +0100 Subject: [PATCH 089/299] ciphers: simplify API schema MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit these need to be checked (and are) via libssl anyway before persisting, and newer versions might contain new ciphers/variants/... (and things like @STRENGTH or @SECLEVEL=n were missing). Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/lib.rs | 43 +++++++--------------------------------- 1 file changed, 7 insertions(+), 36 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4ef8eea1..754e7b22 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -99,20 +99,6 @@ mod local_macros { macro_rules! DNS_ALIAS_NAME { () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")")) } - macro_rules! OPENSSL_CIPHERSUITE_RE { - () => ( - r"TLS_AES_256_GCM_SHA384|TLS_CHACHA20_POLY1305_SHA256|TLS_AES_128_GCM_SHA256|TLS_AES_128_CCM_8_SHA256|TLS_AES_128_CCM_SHA256" - ) - } - macro_rules! OPENSSL_CIPHER_STRING_RE { - () => (concat!( - r"([!\-+]?(COMPLEMENTOFDEFAULT|ALL|COMPLEMENTOFALL|HIGH|MEDIUM|LOW|[ae]?NULL|[ka]?RSA|", - "kDH[rdE]?|kEDH|DHE?|EDH|ADH|kEECDH|kECDHE|ECDH|ECDHE|EECDH|AECDH|a?DSS|aDH|a?ECDSA|", - "SSLv3|AES(128|256)?|GCM|AESGCM|AESCCM|AESCCM8|ARIA(128|256)?|CAMELLIA(128|256)?|", - "CHACHA20|3?DES|RC[24]|IDEA|SEED|MD5|SHA(1|256|384)?|aGOST(01)?|kGOST|GOST94|GOST89MAC|", - "[ak]?PSK|kECDHEPSK|kDHEPSK|kRSAPSK|SUITEB(128|128ONLY|192)?|CBC3?|POLY1305))+" - )) - } } const_regex! { @@ -137,21 +123,8 @@ const_regex! { pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; - pub OPENSSL_CIPHERS_TLS_1_2_REGEX = concat!( - r"^((", - OPENSSL_CIPHER_STRING_RE!(), - ")([: ,](", - OPENSSL_CIPHER_STRING_RE!(), - "))*)$" - ); - - pub OPENSSL_CIPHERS_TLS_1_3_REGEX = concat!( - r"^((", - OPENSSL_CIPHERSUITE_RE!(), - ")(:(", - OPENSSL_CIPHERSUITE_RE!(), - "))*)$" - ); + // just a rough check - dummy acceptor is used before persisting + pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; /// Regex for safe identifiers. /// @@ -189,9 +162,7 @@ pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&B pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); -pub const OPENSSL_CIPHERS_TLS_1_2_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_TLS_1_2_REGEX); -pub const OPENSSL_CIPHERS_TLS_1_3_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_TLS_1_3_REGEX); - +pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); @@ -221,12 +192,12 @@ pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in R .format(&HOSTNAME_FORMAT) .schema(); -pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = StringSchema::new("OpenSSL cipher string list used by the proxy for TLS <= 1.2") - .format(&OPENSSL_CIPHERS_TLS_1_2_FORMAT) +pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") + .format(&OPENSSL_CIPHERS_TLS_FORMAT) .schema(); -pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLSv1.3") - .format(&OPENSSL_CIPHERS_TLS_1_3_FORMAT) +pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") + .format(&OPENSSL_CIPHERS_TLS_FORMAT) .schema(); pub const DNS_NAME_FORMAT: ApiStringFormat = From c72fe7d77c560ad7699dd43d80dbfb98bded4d51 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 27 Jan 2022 15:13:19 +0100 Subject: [PATCH 090/299] verify: allow '0' days for reverification and let it mean that we will always reverify Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index c6664104..0e83da73 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -145,8 +145,8 @@ pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( .schema(); pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( - "Days after that a verification becomes outdated") - .minimum(1) + "Days after that a verification becomes outdated. (0 means always)") + .minimum(0) .schema(); #[api( From 90476cf118cfce2b7334b16210fbf2ae9477e872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 8 Feb 2022 14:57:16 +0100 Subject: [PATCH 091/299] misc clippy fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the trivial ones ;) Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/human_byte.rs | 24 +++++++++++------------- pbs-api-types/src/openid.rs | 2 +- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index e5969875..7793947e 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -53,20 +53,18 @@ impl SizeUnit { 11..=20 => SizeUnit::Kibi, _ => SizeUnit::Byte, } + } else if size >= 1_000_000_000_000_000.0 { + SizeUnit::PByte + } else if size >= 1_000_000_000_000.0 { + SizeUnit::TByte + } else if size >= 1_000_000_000.0 { + SizeUnit::GByte + } else if size >= 1_000_000.0 { + SizeUnit::MByte + } else if size >= 1_000.0 { + SizeUnit::KByte } else { - if size >= 1_000_000_000_000_000.0 { - SizeUnit::PByte - } else if size >= 1_000_000_000_000.0 { - SizeUnit::TByte - } else if size >= 1_000_000_000.0 { - SizeUnit::GByte - } else if size >= 1_000_000.0 { - SizeUnit::MByte - } else if size >= 1_000.0 { - SizeUnit::KByte - } else { - SizeUnit::Byte - } + SizeUnit::Byte } } } diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs index 65967bd1..9dcd1f5b 100644 --- a/pbs-api-types/src/openid.rs +++ b/pbs-api-types/src/openid.rs @@ -22,7 +22,7 @@ pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new( pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat = ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA); -pub const OPENID_DEFAILT_SCOPE_LIST: &'static str = "email profile"; +pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile"; pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List") .format(&OPENID_SCOPE_LIST_FORMAT) .default(OPENID_DEFAILT_SCOPE_LIST) From 842a39af35d7c169df343a36ef9b6a11610b87aa Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 22 Feb 2022 15:57:49 +0100 Subject: [PATCH 092/299] datastore: add tuning option for chunk order currently, we sort chunks by inode when verifying or backing up to tape. we get the inode# by stat'ing each chunk, which may be more expensive than the gains of reading the chunks in order Since that is highly dependent on the underlying storage of the datastore, introduce a tuning option so that the admin can tune that behaviour for each datastore. The default stays the same (sorting by inode) Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 36279b3a..d0215403 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -167,6 +167,38 @@ pub struct PruneOptions { pub keep_yearly: Option, } +#[api] +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// The order to sort chunks by +pub enum ChunkOrder { + /// Iterate chunks in the index order + None, + /// Iterate chunks in inode order + Inode, +} + +#[api( + properties: { + "chunk-order": { + type: ChunkOrder, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Datastore tuning options +pub struct DatastoreTuning { + /// Iterate chunks in this order + pub chunk_order: Option, +} + +pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new( + "Datastore tuning options") + .format(&ApiStringFormat::PropertyString(&DatastoreTuning::API_SCHEMA)) + .schema(); + #[api( properties: { name: { @@ -224,6 +256,10 @@ pub struct PruneOptions { optional: true, type: bool, }, + tuning: { + optional: true, + schema: DATASTORE_TUNING_STRING_SCHEMA, + }, } )] #[derive(Serialize,Deserialize,Updater)] @@ -261,6 +297,9 @@ pub struct DataStoreConfig { /// Send notification only for job errors #[serde(skip_serializing_if="Option::is_none")] pub notify: Option, + /// Datastore tuning options + #[serde(skip_serializing_if="Option::is_none")] + pub tuning: Option, } #[api( From 346e4222379ebb0f38b1d4134a066ffe438e045c Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 7 Mar 2022 07:41:03 +0100 Subject: [PATCH 093/299] cleanup: move BasicRealmInfo to pbs-api-types Signed-off-by: Dietmar Maurer --- pbs-api-types/src/lib.rs | 41 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 754e7b22..2f51afd9 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -442,3 +442,44 @@ pub enum RRDTimeFrame { /// Decade (10 years) Decade, } + +#[api] +#[derive(Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +/// type of the realm +pub enum RealmType { + /// The PAM realm + Pam, + /// The PBS realm + Pbs, + /// An OpenID Connect realm + OpenId, +} + +#[api( + properties: { + realm: { + schema: REALM_ID_SCHEMA, + }, + "type": { + type: RealmType, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +/// Basic Information about a realm +pub struct BasicRealmInfo { + pub realm: String, + #[serde(rename = "type")] + pub ty: RealmType, + /// True if it is the default realm + #[serde(skip_serializing_if = "Option::is_none")] + pub default: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} From 35da5ff9fe9cb0d4c72613a5000d827888aad9ae Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 7 Mar 2022 13:45:16 +0100 Subject: [PATCH 094/299] Username schema: set min_length to 1 Just to get a better error message (the regex already requires min_length 1) Signed-off-by: Dietmar Maurer --- pbs-api-types/src/userid.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 7ee64fb0..12843c3d 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -101,6 +101,7 @@ pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.s #[api( type: String, format: &PROXMOX_USER_NAME_FORMAT, + min_length: 1, )] /// The user name part of a user id. /// From 1a059f3ebeb5a58a23391850292d423c52d606f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 9 Mar 2022 09:55:36 +0100 Subject: [PATCH 095/299] regex: bump to 1.5.5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to ensure CVE fix for DoS on untrusted RE is picked up where it matters Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index e77d8bc4..e89c217b 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1.0" hex = "0.4.3" lazy_static = "1.4" percent-encoding = "2.1" -regex = "1.2" +regex = "1.5.5" serde = { version = "1.0", features = ["derive"] } proxmox-lang = "1.0.0" From fdcb2694b44e5e6121d4da13ee0b748cff729e17 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 20 Mar 2022 09:36:03 +0100 Subject: [PATCH 096/299] datastore status: factor out api type DataStoreStatusListItem And use the rust type instead of json::Value. --- pbs-api-types/src/datastore.rs | 42 ++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index d0215403..36d86c98 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -633,6 +633,48 @@ pub struct DataStoreStatus { pub counts: Option, } +#[api( + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + history: { + type: Array, + optional: true, + items: { + type: Number, + description: "The usage of a time in the past. Either null or between 0.0 and 1.0.", + } + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all="kebab-case")] +/// Status of a Datastore +pub struct DataStoreStatusListItem { + pub store: String, + /// The Size of the underlying storage in bytes. + pub total: u64, + /// The used bytes of the underlying storage. + pub used: u64, + /// The available bytes of the underlying storage. + pub avail: u64, + /// A list of usages of the past (last Month). + pub history: Option>>, + /// History start time (epoch) + pub history_start: Option, + /// History resolution (seconds) + pub history_delta: Option, + /// Estimation of the UNIX epoch when the storage will be full. + /// This is calculated via a simple Linear Regression (Least + /// Squares) of RRD data of the last Month. Missing if there are + /// not enough data points yet. If the estimate lies in the past, + /// the usage is decreasing. + pub estimated_full_date: Option, + /// An error description, for example, when the datastore could not be looked up + pub error: Option, +} + pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { optional: false, schema: &ArraySchema::new( From 24a10d107ab1a7852a64d89fff604708260f2b9d Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 22 Mar 2022 09:14:57 +0100 Subject: [PATCH 097/299] api: datastore_status: restore api/gui compatibility the latest changes to this api call changed/removed some things that were actually necessary for the gui. Readd those and document them this time. The change from u64 to i64 limits us to 8EiB of Datastore sizes (instead if 16EiB) but if we reach that, we must adapt most other parts to use 128bit sizes anyway Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 36d86c98..158ae140 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -653,25 +653,30 @@ pub struct DataStoreStatus { /// Status of a Datastore pub struct DataStoreStatusListItem { pub store: String, - /// The Size of the underlying storage in bytes. - pub total: u64, - /// The used bytes of the underlying storage. - pub used: u64, - /// The available bytes of the underlying storage. - pub avail: u64, + /// The Size of the underlying storage in bytes. (-1 on error) + pub total: i64, + /// The used bytes of the underlying storage. (-1 on error) + pub used: i64, + /// The available bytes of the underlying storage. (-1 on error) + pub avail: i64, /// A list of usages of the past (last Month). + #[serde(skip_serializing_if="Option::is_none")] pub history: Option>>, /// History start time (epoch) + #[serde(skip_serializing_if="Option::is_none")] pub history_start: Option, /// History resolution (seconds) + #[serde(skip_serializing_if="Option::is_none")] pub history_delta: Option, /// Estimation of the UNIX epoch when the storage will be full. /// This is calculated via a simple Linear Regression (Least /// Squares) of RRD data of the last Month. Missing if there are /// not enough data points yet. If the estimate lies in the past, - /// the usage is decreasing. + /// the usage is decreasing or not changing. + #[serde(skip_serializing_if="Option::is_none")] pub estimated_full_date: Option, /// An error description, for example, when the datastore could not be looked up + #[serde(skip_serializing_if="Option::is_none")] pub error: Option, } From f8c7bc4fb4e0dc243ac8896f54fc8bfff0f7e652 Mon Sep 17 00:00:00 2001 From: Stefan Sterz Date: Fri, 4 Mar 2022 12:31:57 +0100 Subject: [PATCH 098/299] fix #3067: api: add support for multi-line comments in node.cfg add support for multi-line comments to node.cfg and the api, similar to how pve handles multi-line comments Signed-off-by: Stefan Sterz Acked-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 2f51afd9..421566f7 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -137,6 +137,8 @@ const_regex! { pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; + pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$"; + pub BACKUP_REPO_URL_REGEX = concat!( r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), @@ -273,6 +275,13 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl .format(&SINGLE_LINE_COMMENT_FORMAT) .schema(); +pub const MULTI_LINE_COMMENT_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&MULTI_LINE_COMMENT_REGEX); + +pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multiple lines).") + .format(&MULTI_LINE_COMMENT_FORMAT) + .schema(); + pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.") .format(&SUBSCRIPTION_KEY_FORMAT) .min_length(15) From 908908191e125c7c32dd4a3622e0ab7d3071d21d Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sun, 10 Apr 2022 17:53:42 +0200 Subject: [PATCH 099/299] api types: rust fmt Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/crypto.rs | 14 +-- pbs-api-types/src/datastore.rs | 103 ++++++++--------- pbs-api-types/src/human_byte.rs | 31 +++++- pbs-api-types/src/jobs.rs | 159 ++++++++++++++------------- pbs-api-types/src/key_derivation.rs | 7 +- pbs-api-types/src/lib.rs | 111 +++++++++---------- pbs-api-types/src/network.rs | 73 ++++++------ pbs-api-types/src/openid.rs | 54 +++++---- pbs-api-types/src/remote.rs | 35 +++--- pbs-api-types/src/tape/device.rs | 12 +- pbs-api-types/src/tape/drive.rs | 65 +++++------ pbs-api-types/src/tape/media.rs | 48 ++++---- pbs-api-types/src/tape/media_pool.rs | 57 +++++----- pbs-api-types/src/tape/mod.rs | 31 +++--- pbs-api-types/src/traffic_control.rs | 46 ++++---- pbs-api-types/src/user.rs | 64 +++++------ pbs-api-types/src/userid.rs | 51 +++++++-- pbs-api-types/src/zfs.rs | 5 +- 18 files changed, 497 insertions(+), 469 deletions(-) diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs index bd817034..cdc1ba64 100644 --- a/pbs-api-types/src/crypto.rs +++ b/pbs-api-types/src/crypto.rs @@ -62,18 +62,16 @@ fn as_fingerprint(bytes: &[u8]) -> String { .as_bytes() .chunks(2) .map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string - .collect::>().join(":") + .collect::>() + .join(":") } pub mod bytes_as_fingerprint { use std::mem::MaybeUninit; - use serde::{Deserialize, Serializer, Deserializer}; + use serde::{Deserialize, Deserializer, Serializer}; - pub fn serialize( - bytes: &[u8; 32], - serializer: S, - ) -> Result + pub fn serialize(bytes: &[u8; 32], serializer: S) -> Result where S: Serializer, { @@ -81,9 +79,7 @@ pub mod bytes_as_fingerprint { serializer.serialize_str(&s) } - pub fn deserialize<'de, D>( - deserializer: D, - ) -> Result<[u8; 32], D::Error> + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error> where D: Deserializer<'de>, { diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 158ae140..ea60e023 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -6,13 +6,12 @@ use proxmox_schema::{ }; use crate::{ - PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID, - Fingerprint, Userid, Authid, - GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA, - + Authid, CryptMode, Fingerprint, Userid, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, + PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, + UPID, }; -const_regex!{ +const_regex! { pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); @@ -81,17 +80,19 @@ pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.") .type_text("(=)?") .schema(); -pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Datastore mapping list.", &DATASTORE_MAP_SCHEMA) - .schema(); +pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = + ArraySchema::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA).schema(); pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new( "A list of Datastore mappings (or single datastore), comma separated. \ For example 'a=b,e' maps the source datastore 'a' to target 'b and \ all other sources to the default 'e'. If no default is given, only the \ - specified sources are mapped.") - .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA)) - .schema(); + specified sources are mapped.", +) +.format(&ApiStringFormat::PropertyString( + &DATASTORE_MAP_ARRAY_SCHEMA, +)) +.schema(); pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.") .minimum(1) @@ -153,17 +154,17 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = #[serde(rename_all = "kebab-case")] /// Common pruning options pub struct PruneOptions { - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_last: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_hourly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_daily: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_weekly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_monthly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_yearly: Option, } @@ -194,9 +195,10 @@ pub struct DatastoreTuning { pub chunk_order: Option, } -pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new( - "Datastore tuning options") - .format(&ApiStringFormat::PropertyString(&DatastoreTuning::API_SCHEMA)) +pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options") + .format(&ApiStringFormat::PropertyString( + &DatastoreTuning::API_SCHEMA, + )) .schema(); #[api( @@ -262,43 +264,43 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new( }, } )] -#[derive(Serialize,Deserialize,Updater)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Updater)] +#[serde(rename_all = "kebab-case")] /// Datastore configuration properties. pub struct DataStoreConfig { #[updater(skip)] pub name: String, #[updater(skip)] pub path: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub gc_schedule: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub prune_schedule: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_last: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_hourly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_daily: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_weekly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_monthly: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub keep_yearly: Option, /// If enabled, all backups will be verified right after completion. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub verify_new: Option, /// Send job email notification to this user - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub notify_user: Option, /// Send notification only for job errors - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub notify: Option, /// Datastore tuning options - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub tuning: Option, } @@ -375,7 +377,6 @@ pub struct SnapshotVerifyState { pub state: VerifyState, } - #[api( properties: { "backup-type": { @@ -616,7 +617,7 @@ impl Default for GarbageCollectionStatus { }, )] #[derive(Serialize, Deserialize)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// Overall Datastore status and useful information. pub struct DataStoreStatus { /// Total space (bytes). @@ -626,10 +627,10 @@ pub struct DataStoreStatus { /// Available space (bytes). pub avail: u64, /// Status of last GC - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub gc_status: Option, /// Group/Snapshot counts - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub counts: Option, } @@ -649,7 +650,7 @@ pub struct DataStoreStatus { }, )] #[derive(Serialize, Deserialize)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// Status of a Datastore pub struct DataStoreStatusListItem { pub store: String, @@ -660,23 +661,23 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) pub avail: i64, /// A list of usages of the past (last Month). - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, /// History start time (epoch) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub history_start: Option, /// History resolution (seconds) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub history_delta: Option, /// Estimation of the UNIX epoch when the storage will be full. /// This is calculated via a simple Linear Regression (Least /// Squares) of RRD data of the last Month. Missing if there are /// not enough data points yet. If the estimate lies in the past, /// the usage is decreasing or not changing. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub estimated_full_date: Option, /// An error description, for example, when the datastore could not be looked up - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, } @@ -685,7 +686,8 @@ pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { schema: &ArraySchema::new( "Returns the list of snapshots.", &SnapshotListItem::API_SCHEMA, - ).schema(), + ) + .schema(), }; pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType { @@ -693,7 +695,8 @@ pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnTy schema: &ArraySchema::new( "Returns the list of archive files inside a backup snapshots.", &BackupContent::API_SCHEMA, - ).schema(), + ) + .schema(), }; pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { @@ -701,7 +704,8 @@ pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { schema: &ArraySchema::new( "Returns the list of backup groups.", &GroupListItem::API_SCHEMA, - ).schema(), + ) + .schema(), }; pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { @@ -709,5 +713,6 @@ pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { schema: &ArraySchema::new( "Returns the list of snapshots and a flag indicating if there are kept or removed.", &PruneListItem::API_SCHEMA, - ).schema(), + ) + .schema(), }; diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 7793947e..9e1a1893 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -101,7 +101,8 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) { }; let mut unit = SizeUnit::Byte; - (v.strip_suffix(|c: char| match c { + #[rustfmt::skip] + let value = v.strip_suffix(|c: char| match c { 'k' | 'K' if !binary => { unit = SizeUnit::KByte; true } 'm' | 'M' if !binary => { unit = SizeUnit::MByte; true } 'g' | 'G' if !binary => { unit = SizeUnit::GByte; true } @@ -114,7 +115,9 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) { 't' | 'T' if binary => { unit = SizeUnit::Tebi; true } 'p' | 'P' if binary => { unit = SizeUnit::Pebi; true } _ => false - }).unwrap_or(v).trim_end(), unit) + }).unwrap_or(v).trim_end(); + + (value, unit) } /// Byte size which can be displayed in a human friendly way @@ -154,13 +157,19 @@ impl HumanByte { /// Create a new instance with optimal binary unit computed pub fn new_binary(size: f64) -> Self { let unit = SizeUnit::auto_scale(size, true); - HumanByte { size: size / unit.factor(), unit } + HumanByte { + size: size / unit.factor(), + unit, + } } /// Create a new instance with optimal decimal unit computed pub fn new_decimal(size: f64) -> Self { let unit = SizeUnit::auto_scale(size, false); - HumanByte { size: size / unit.factor(), unit } + HumanByte { + size: size / unit.factor(), + unit, + } } /// Returns the size as u64 number of bytes @@ -228,7 +237,12 @@ fn test_human_byte_parser() -> Result<(), Error> { bail!("got unexpected size for '{}' ({} != {})", v, h.size, size); } if h.unit != unit { - bail!("got unexpected unit for '{}' ({:?} != {:?})", v, h.unit, unit); + bail!( + "got unexpected unit for '{}' ({:?} != {:?})", + v, + h.unit, + unit + ); } let new = h.to_string(); @@ -265,7 +279,12 @@ fn test_human_byte_parser() -> Result<(), Error> { assert_eq!(&format!("{:.7}", h), "1.2345678 B"); assert_eq!(&format!("{:.8}", h), "1.2345678 B"); - assert!(test("987654321", 987654321.0, SizeUnit::Byte, "987654321 B")); + assert!(test( + "987654321", + 987654321.0, + SizeUnit::Byte, + "987654321 B" + )); assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B")); assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B")); diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 0e83da73..654c0477 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,13 +7,12 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Userid, Authid, RateLimitConfig, - REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, - SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA, - BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA, + Authid, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, + DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, + SINGLE_LINE_COMMENT_SCHEMA, }; -const_regex!{ +const_regex! { /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); @@ -27,34 +26,41 @@ pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") .max_length(32) .schema(); -pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( - "Run sync job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) +pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.") + .format(&ApiStringFormat::VerifyFn( + proxmox_time::verify_calendar_event, + )) .type_text("") .schema(); -pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( - "Run garbage collection job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) +pub const GC_SCHEDULE_SCHEMA: Schema = + StringSchema::new("Run garbage collection job at specified schedule.") + .format(&ApiStringFormat::VerifyFn( + proxmox_time::verify_calendar_event, + )) + .type_text("") + .schema(); + +pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.") + .format(&ApiStringFormat::VerifyFn( + proxmox_time::verify_calendar_event, + )) .type_text("") .schema(); -pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( - "Run prune job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) - .type_text("") - .schema(); - -pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( - "Run verify job at specified schedule.") - .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event)) - .type_text("") - .schema(); +pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = + StringSchema::new("Run verify job at specified schedule.") + .format(&ApiStringFormat::VerifyFn( + proxmox_time::verify_calendar_event, + )) + .type_text("") + .schema(); pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( - "Delete vanished backups. This remove the local copy if the remote backup was deleted.") - .default(false) - .schema(); + "Delete vanished backups. This remove the local copy if the remote backup was deleted.", +) +.default(false) +.schema(); #[api( properties: { @@ -80,17 +86,17 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( }, } )] -#[derive(Serialize,Deserialize,Default)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] /// Job Scheduling Status pub struct JobScheduleStatus { - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub next_run: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub last_run_state: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub last_run_upid: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub last_run_endtime: Option, } @@ -134,20 +140,23 @@ pub struct DatastoreNotify { pub sync: Option, } -pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( - "Datastore notification setting") - .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA)) - .schema(); +pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = + StringSchema::new("Datastore notification setting") + .format(&ApiStringFormat::PropertyString( + &DatastoreNotify::API_SCHEMA, + )) + .schema(); pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( - "Do not verify backups that are already verified if their verification is not outdated.") - .default(true) - .schema(); + "Do not verify backups that are already verified if their verification is not outdated.", +) +.default(true) +.schema(); -pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( - "Days after that a verification becomes outdated. (0 means always)") - .minimum(0) - .schema(); +pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = + IntegerSchema::new("Days after that a verification becomes outdated. (0 means always)") + .minimum(0) + .schema(); #[api( properties: { @@ -175,8 +184,8 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( }, } )] -#[derive(Serialize,Deserialize,Updater)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Updater)] +#[serde(rename_all = "kebab-case")] /// Verification Job pub struct VerificationJobConfig { /// unique ID to address this job @@ -184,16 +193,16 @@ pub struct VerificationJobConfig { pub id: String, /// the datastore ID this verificaiton job affects pub store: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// if not set to false, check the age of the last snapshot verification to filter /// out recent ones, depending on 'outdated_after' configuration. pub ignore_verified: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. pub outdated_after: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// when to schedule this job in calendar event notation pub schedule: Option, } @@ -208,8 +217,8 @@ pub struct VerificationJobConfig { }, }, )] -#[derive(Serialize,Deserialize)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] /// Status of Verification Job pub struct VerificationJobStatus { #[serde(flatten)] @@ -254,23 +263,23 @@ pub struct VerificationJobStatus { }, } )] -#[derive(Serialize,Deserialize,Clone,Updater)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Clone, Updater)] +#[serde(rename_all = "kebab-case")] /// Tape Backup Job Setup pub struct TapeBackupJobSetup { pub store: String, pub pool: String, pub drive: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub eject_media: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub export_media_set: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub latest_only: Option, /// Send job email notification to this user - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub notify_user: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub group_filter: Option>, } @@ -292,17 +301,17 @@ pub struct TapeBackupJobSetup { }, } )] -#[derive(Serialize,Deserialize,Clone,Updater)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Clone, Updater)] +#[serde(rename_all = "kebab-case")] /// Tape Backup Job pub struct TapeBackupJobConfig { #[updater(skip)] pub id: String, #[serde(flatten)] pub setup: TapeBackupJobSetup, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub schedule: Option, } @@ -316,8 +325,8 @@ pub struct TapeBackupJobConfig { }, }, )] -#[derive(Serialize,Deserialize)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] /// Status of Tape Backup Job pub struct TapeBackupJobStatus { #[serde(flatten)] @@ -325,7 +334,7 @@ pub struct TapeBackupJobStatus { #[serde(flatten)] pub status: JobScheduleStatus, /// Next tape used (best guess) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub next_media_label: Option, } @@ -378,7 +387,8 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( .type_text("|group:GROUP|regex:RE>") .schema(); -pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); +pub const GROUP_FILTER_LIST_SCHEMA: Schema = + ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); #[api( properties: { @@ -419,24 +429,24 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group fil }, } )] -#[derive(Serialize,Deserialize,Clone,Updater)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize, Clone, Updater)] +#[serde(rename_all = "kebab-case")] /// Sync Job pub struct SyncJobConfig { #[updater(skip)] pub id: String, pub store: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, pub remote: String, pub remote_store: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub remove_vanished: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub schedule: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub group_filter: Option>, #[serde(flatten)] pub limit: RateLimitConfig, @@ -452,9 +462,8 @@ pub struct SyncJobConfig { }, }, )] - -#[derive(Serialize,Deserialize)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] /// Status of Sync Job pub struct SyncJobStatus { #[serde(flatten)] diff --git a/pbs-api-types/src/key_derivation.rs b/pbs-api-types/src/key_derivation.rs index 26b86c30..8d6cbc89 100644 --- a/pbs-api-types/src/key_derivation.rs +++ b/pbs-api-types/src/key_derivation.rs @@ -39,7 +39,7 @@ impl Default for Kdf { /// Encryption Key Information pub struct KeyInfo { /// Path to key (if stored in a file) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub path: Option, pub kdf: Kdf, /// Key creation time @@ -47,10 +47,9 @@ pub struct KeyInfo { /// Key modification time pub modified: i64, /// Key fingerprint - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub fingerprint: Option, /// Password hint - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub hint: Option, } - diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 421566f7..03119f4a 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -6,7 +6,7 @@ pub mod common_regex; pub mod percent_encoding; use proxmox_schema::{ - api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, ReturnType, + api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, }; use proxmox_time::parse_daily_duration; @@ -68,7 +68,7 @@ pub use user::*; pub use proxmox_schema::upid::*; mod crypto; -pub use crypto::{CryptMode, Fingerprint, bytes_as_fingerprint}; +pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint}; pub mod file_restore; @@ -87,7 +87,6 @@ pub use traffic_control::*; mod zfs; pub use zfs::*; - #[rustfmt::skip] #[macro_use] mod local_macros { @@ -160,14 +159,17 @@ pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); -pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); -pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); -pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); +pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); +pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); +pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); -pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); +pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); -pub const DNS_ALIAS_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); +pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); pub const DAILY_DURATION_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop)); @@ -175,18 +177,15 @@ pub const DAILY_DURATION_FORMAT: ApiStringFormat = pub const SEARCH_DOMAIN_SCHEMA: Schema = StringSchema::new("Search domain for host-name lookup.").schema(); -pub const FIRST_DNS_SERVER_SCHEMA: Schema = - StringSchema::new("First name server IP address.") +pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.") .format(&IP_FORMAT) .schema(); -pub const SECOND_DNS_SERVER_SCHEMA: Schema = - StringSchema::new("Second name server IP address.") +pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.") .format(&IP_FORMAT) .schema(); -pub const THIRD_DNS_SERVER_SCHEMA: Schema = - StringSchema::new("Third name server IP address.") +pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.") .format(&IP_FORMAT) .schema(); @@ -194,48 +193,47 @@ pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in R .format(&HOSTNAME_FORMAT) .schema(); -pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") - .format(&OPENSSL_CIPHERS_TLS_FORMAT) - .schema(); +pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = + StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") + .format(&OPENSSL_CIPHERS_TLS_FORMAT) + .schema(); -pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") - .format(&OPENSSL_CIPHERS_TLS_FORMAT) - .schema(); +pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = + StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") + .format(&OPENSSL_CIPHERS_TLS_FORMAT) + .schema(); -pub const DNS_NAME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&DNS_NAME_REGEX); +pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX); -pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX); +pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX); pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.") .format(&DNS_NAME_OR_IP_FORMAT) .schema(); - pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") .format(&HOSTNAME_FORMAT) .schema(); pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( - "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(2) - .max_length(64) - .schema(); + "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.", +) +.format(&SINGLE_LINE_COMMENT_FORMAT) +.min_length(2) +.max_length(64) +.schema(); -pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/).") - .format(&BLOCKDEVICE_NAME_FORMAT) - .min_length(3) - .max_length(64) - .schema(); +pub const BLOCKDEVICE_NAME_SCHEMA: Schema = + StringSchema::new("Block device name (/sys/block/).") + .format(&BLOCKDEVICE_NAME_FORMAT) + .min_length(3) + .max_length(64) + .schema(); -pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Disk name list.", &BLOCKDEVICE_NAME_SCHEMA) - .schema(); +pub const DISK_ARRAY_SCHEMA: Schema = + ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema(); -pub const DISK_LIST_SCHEMA: Schema = StringSchema::new( - "A list of disk names, comma separated.") +pub const DISK_LIST_SCHEMA: Schema = StringSchema::new("A list of disk names, comma separated.") .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) .schema(); @@ -282,15 +280,14 @@ pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multip .format(&MULTI_LINE_COMMENT_FORMAT) .schema(); -pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.") - .format(&SUBSCRIPTION_KEY_FORMAT) - .min_length(15) - .max_length(16) - .schema(); +pub const SUBSCRIPTION_KEY_SCHEMA: Schema = + StringSchema::new("Proxmox Backup Server subscription key.") + .format(&SUBSCRIPTION_KEY_FORMAT) + .min_length(15) + .max_length(16) + .schema(); -pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.") - .max_length(256) - .schema(); +pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.").max_length(256).schema(); pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( "Prevent changes if current configuration file has different \ @@ -303,10 +300,8 @@ pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( /// API schema format definition for repository URLs pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); - // Complex type definitions - #[api()] #[derive(Default, Serialize, Deserialize)] /// Storage space usage information. @@ -325,7 +320,6 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") .max_length(64) .schema(); - #[api()] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] @@ -352,11 +346,10 @@ pub struct APTUpdateInfo { /// URL under which the package's changelog can be retrieved pub change_log_url: String, /// Custom extra field for additional package information - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub extra_info: Option, } - #[api()] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -368,7 +361,6 @@ pub enum NodePowerCommand { Shutdown, } - #[api()] #[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -407,19 +399,16 @@ pub struct TaskListItem { /// The authenticated entity who started the task pub user: String, /// The task end time (Epoch) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub endtime: Option, /// Task end status - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, } pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { optional: false, - schema: &ArraySchema::new( - "A list of tasks.", - &TaskListItem::API_SCHEMA, - ).schema(), + schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), }; #[api()] diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index f2af3e6f..dda0db1b 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -3,49 +3,43 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ + CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT, PROXMOX_SAFE_ID_REGEX, - IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT, - CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT, }; pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); -pub const IP_V4_SCHEMA: Schema = - StringSchema::new("IPv4 address.") +pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.") .format(&IP_V4_FORMAT) .max_length(15) .schema(); -pub const IP_V6_SCHEMA: Schema = - StringSchema::new("IPv6 address.") +pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.") .format(&IP_V6_FORMAT) .max_length(39) .schema(); -pub const IP_SCHEMA: Schema = - StringSchema::new("IP (IPv4 or IPv6) address.") +pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.") .format(&IP_FORMAT) .max_length(39) .schema(); -pub const CIDR_V4_SCHEMA: Schema = - StringSchema::new("IPv4 address with netmask (CIDR notation).") +pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).") .format(&CIDR_V4_FORMAT) .max_length(18) .schema(); -pub const CIDR_V6_SCHEMA: Schema = - StringSchema::new("IPv6 address with netmask (CIDR notation).") +pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).") .format(&CIDR_V6_FORMAT) .max_length(43) .schema(); pub const CIDR_SCHEMA: Schema = StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).") - .format(&CIDR_FORMAT) - .max_length(43) - .schema(); + .format(&CIDR_FORMAT) + .max_length(43) + .schema(); #[api()] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] @@ -130,14 +124,15 @@ pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network int .max_length(15) // libc::IFNAMSIZ-1 .schema(); -pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA) - .schema(); +pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = + ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema(); -pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new( - "A list of network devices, comma separated.") - .format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA)) - .schema(); +pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = + StringSchema::new("A list of network devices, comma separated.") + .format(&ApiStringFormat::PropertyString( + &NETWORK_INTERFACE_ARRAY_SCHEMA, + )) + .schema(); #[api( properties: { @@ -232,48 +227,48 @@ pub struct Interface { /// Interface type #[serde(rename = "type")] pub interface_type: NetworkInterfaceType, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub method: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub method6: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// IPv4 address with netmask pub cidr: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// IPv4 gateway pub gateway: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// IPv6 address with netmask pub cidr6: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// IPv6 gateway pub gateway6: Option, - #[serde(skip_serializing_if="Vec::is_empty")] + #[serde(skip_serializing_if = "Vec::is_empty")] pub options: Vec, - #[serde(skip_serializing_if="Vec::is_empty")] + #[serde(skip_serializing_if = "Vec::is_empty")] pub options6: Vec, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comments: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comments6: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] /// Maximum Transmission Unit pub mtu: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub bridge_ports: Option>, /// Enable bridge vlan support. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub bridge_vlan_aware: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub slaves: Option>, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub bond_mode: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "bond-primary")] pub bond_primary: Option, pub bond_xmit_hash_policy: Option, @@ -281,7 +276,7 @@ pub struct Interface { impl Interface { pub fn new(name: String) -> Self { - Self { + Self { name, interface_type: NetworkInterfaceType::Unknown, autostart: false, diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs index 9dcd1f5b..2c7646a3 100644 --- a/pbs-api-types/src/openid.rs +++ b/pbs-api-types/src/openid.rs @@ -1,23 +1,19 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{ - api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater, -}; +use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater}; use super::{ - PROXMOX_SAFE_ID_REGEX, PROXMOX_SAFE_ID_FORMAT, REALM_ID_SCHEMA, - SINGLE_LINE_COMMENT_SCHEMA, + PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; -pub const OPENID_SCOPE_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); +pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.") .format(&OPENID_SCOPE_FORMAT) .schema(); -pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema(); +pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = + ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema(); pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat = ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA); @@ -28,15 +24,15 @@ pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope Lis .default(OPENID_DEFAILT_SCOPE_LIST) .schema(); -pub const OPENID_ACR_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); +pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); -pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.") - .format(&OPENID_SCOPE_FORMAT) - .schema(); +pub const OPENID_ACR_SCHEMA: Schema = + StringSchema::new("OpenID Authentication Context Class Reference.") + .format(&OPENID_SCOPE_FORMAT) + .schema(); -pub const OPENID_ACR_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema(); +pub const OPENID_ACR_ARRAY_SCHEMA: Schema = + ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema(); pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat = ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA); @@ -50,10 +46,12 @@ pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new( is up to the identity provider to guarantee the uniqueness. The \ OpenID specification only guarantees that Subject ('sub') is \ unique. Also make sure that the user is not allowed to change that \ - attribute by himself!") - .max_length(64) - .min_length(1) - .format(&PROXMOX_SAFE_ID_FORMAT) .schema(); + attribute by himself!", +) +.max_length(64) +.min_length(1) +.format(&PROXMOX_SAFE_ID_FORMAT) +.schema(); #[api( properties: { @@ -92,7 +90,7 @@ pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new( }, )] #[derive(Serialize, Deserialize, Updater)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// OpenID configuration properties. pub struct OpenIdRealmConfig { #[updater(skip)] @@ -101,21 +99,21 @@ pub struct OpenIdRealmConfig { pub issuer_url: String, /// OpenID Client ID pub client_id: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub scopes: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub acr_values: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub prompt: Option, /// OpenID Client Key - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub client_key: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, /// Automatically create users if they do not exist. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub autocreate: Option, #[updater(skip)] - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub username_claim: Option, } diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index b7cee5d3..1ebc9d4c 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -3,17 +3,19 @@ use serde::{Deserialize, Serialize}; use super::*; use proxmox_schema::*; -pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.") - .format(&PASSWORD_FORMAT) - .min_length(1) - .max_length(1024) - .schema(); +pub const REMOTE_PASSWORD_SCHEMA: Schema = + StringSchema::new("Password or auth token for remote host.") + .format(&PASSWORD_FORMAT) + .min_length(1) + .max_length(1024) + .schema(); -pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).") - .format(&PASSWORD_FORMAT) - .min_length(1) - .max_length(1024) - .schema(); +pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = + StringSchema::new("Password or auth token for remote host (stored as base64 string).") + .format(&PASSWORD_FORMAT) + .min_length(1) + .max_length(1024) + .schema(); pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -21,7 +23,6 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") .max_length(32) .schema(); - #[api( properties: { comment: { @@ -45,17 +46,17 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") }, }, )] -#[derive(Serialize,Deserialize,Updater)] +#[derive(Serialize, Deserialize, Updater)] #[serde(rename_all = "kebab-case")] /// Remote configuration properties. pub struct RemoteConfig { - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, pub host: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub port: Option, pub auth_id: Authid, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub fingerprint: Option, } @@ -72,13 +73,13 @@ pub struct RemoteConfig { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Remote properties. pub struct Remote { pub name: String, // Note: The stored password is base64 encoded - #[serde(skip_serializing_if="String::is_empty")] + #[serde(skip_serializing_if = "String::is_empty")] #[serde(with = "proxmox_serde::string_as_base64")] pub password: String, #[serde(flatten)] diff --git a/pbs-api-types/src/tape/device.rs b/pbs-api-types/src/tape/device.rs index 54fad8b0..ff335cdf 100644 --- a/pbs-api-types/src/tape/device.rs +++ b/pbs-api-types/src/tape/device.rs @@ -3,23 +3,23 @@ use ::serde::{Deserialize, Serialize}; use proxmox_schema::api; #[api()] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Optional Device Identification Attributes pub struct OptionalDeviceIdentification { /// Vendor (autodetected) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub vendor: Option, /// Model (autodetected) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub model: Option, /// Serial number (autodetected) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub serial: Option, } #[api()] -#[derive(Debug,Serialize,Deserialize)] +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Kind of device pub enum DeviceKind { @@ -36,7 +36,7 @@ pub enum DeviceKind { }, }, )] -#[derive(Debug,Serialize,Deserialize)] +#[derive(Debug, Serialize, Deserialize)] /// Tape device information pub struct TapeDeviceInfo { pub kind: DeviceKind, diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index e177d39f..c8cb077c 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -4,13 +4,9 @@ use std::convert::TryFrom; use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater}; +use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; -use crate::{ - PROXMOX_SAFE_ID_FORMAT, - CHANGER_NAME_SCHEMA, - OptionalDeviceIdentification, -}; +use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -18,16 +14,15 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.") .max_length(32) .schema(); -pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new( - "The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')") - .schema(); +pub const LTO_DRIVE_PATH_SCHEMA: Schema = + StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema(); -pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new( - "Associated changer drive number (requires option changer)") - .minimum(0) - .maximum(255) - .default(0) - .schema(); +pub const CHANGER_DRIVENUM_SCHEMA: Schema = + IntegerSchema::new("Associated changer drive number (requires option changer)") + .minimum(0) + .maximum(255) + .default(0) + .schema(); #[api( properties: { @@ -36,7 +31,7 @@ pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new( } } )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] /// Simulate tape drives (only for test and debug) #[serde(rename_all = "kebab-case")] pub struct VirtualTapeDrive { @@ -44,7 +39,7 @@ pub struct VirtualTapeDrive { /// Path to directory pub path: String, /// Virtual tape size - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub max_size: Option, } @@ -66,16 +61,16 @@ pub struct VirtualTapeDrive { }, } )] -#[derive(Serialize,Deserialize,Updater)] +#[derive(Serialize, Deserialize, Updater)] #[serde(rename_all = "kebab-case")] /// Lto SCSI tape driver pub struct LtoTapeDrive { #[updater(skip)] pub name: String, pub path: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub changer: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub changer_drivenum: Option, } @@ -89,7 +84,7 @@ pub struct LtoTapeDrive { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Drive list entry pub struct DriveListEntry { @@ -98,12 +93,12 @@ pub struct DriveListEntry { #[serde(flatten)] pub info: OptionalDeviceIdentification, /// the state of the drive if locked - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub state: Option, } #[api()] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] /// Medium auxiliary memory attributes (MAM) pub struct MamAttribute { /// Attribute id @@ -115,7 +110,7 @@ pub struct MamAttribute { } #[api()] -#[derive(Serialize,Deserialize,Copy,Clone,Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub enum TapeDensity { /// Unknown (no media loaded) Unknown, @@ -168,7 +163,7 @@ impl TryFrom for TapeDensity { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Drive/Media status for Lto SCSI drives. /// @@ -190,35 +185,35 @@ pub struct LtoDriveAndMediaStatus { /// Tape density pub density: TapeDensity, /// Media is write protected - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub write_protect: Option, /// Tape Alert Flags - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub alert_flags: Option, /// Current file number - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub file_number: Option, /// Current block number - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub block_number: Option, /// Medium Manufacture Date (epoch) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub manufactured: Option, /// Total Bytes Read in Medium Life - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub bytes_read: Option, /// Total Bytes Written in Medium Life - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub bytes_written: Option, /// Number of mounts for the current volume (i.e., Thread Count) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub volume_mounts: Option, /// Count of the total number of times the medium has passed over /// the head. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub medium_passes: Option, /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub medium_wearout: Option, } diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs index 61d7be04..c2c25da0 100644 --- a/pbs-api-types/src/tape/media.rs +++ b/pbs-api-types/src/tape/media.rs @@ -3,19 +3,15 @@ use ::serde::{Deserialize, Serialize}; use proxmox_schema::*; use proxmox_uuid::Uuid; -use crate::{ - UUID_FORMAT, - MediaStatus, - MediaLocation, -}; +use crate::{MediaLocation, MediaStatus, UUID_FORMAT}; -pub const MEDIA_SET_UUID_SCHEMA: Schema = - StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).") - .format(&UUID_FORMAT) - .schema(); +pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new( + "MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).", +) +.format(&UUID_FORMAT) +.schema(); -pub const MEDIA_UUID_SCHEMA: Schema = - StringSchema::new("Media Uuid.") +pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.") .format(&UUID_FORMAT) .schema(); @@ -26,7 +22,7 @@ pub const MEDIA_UUID_SCHEMA: Schema = }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Media Set list entry pub struct MediaSetListEntry { @@ -56,7 +52,7 @@ pub struct MediaSetListEntry { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Media list entry pub struct MediaListEntry { @@ -72,18 +68,18 @@ pub struct MediaListEntry { /// Catalog status OK pub catalog: bool, /// Media set name - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_name: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_uuid: Option, /// Media set seq_nr - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub seq_nr: Option, /// MediaSet creation time stamp - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_ctime: Option, /// Media Pool - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub pool: Option, } @@ -98,7 +94,7 @@ pub struct MediaListEntry { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Media label info pub struct MediaIdFlat { @@ -110,18 +106,18 @@ pub struct MediaIdFlat { pub ctime: i64, // All MediaSet properties are optional here /// MediaSet Pool - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub pool: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_uuid: Option, /// MediaSet media sequence number - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub seq_nr: Option, /// MediaSet Creation time stamp - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_ctime: Option, /// Encryption key fingerprint - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub encryption_key_fingerprint: Option, } @@ -133,7 +129,7 @@ pub struct MediaIdFlat { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Label with optional Uuid pub struct LabelUuidMap { @@ -153,7 +149,7 @@ pub struct LabelUuidMap { }, }, )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Media content list entry pub struct MediaContentEntry { diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs index c0cba2bd..c3eacec7 100644 --- a/pbs-api-types/src/tape/media_pool.rs +++ b/pbs-api-types/src/tape/media_pool.rs @@ -9,14 +9,12 @@ use std::str::FromStr; use anyhow::Error; use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, Schema, StringSchema, ApiStringFormat, Updater}; +use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater}; use proxmox_time::{CalendarEvent, TimeSpan}; use crate::{ - PROXMOX_SAFE_ID_FORMAT, - SINGLE_LINE_COMMENT_FORMAT, - SINGLE_LINE_COMMENT_SCHEMA, + PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, }; @@ -27,19 +25,22 @@ pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.") .schema(); pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new( - "Media set naming template (may contain strftime() time format specifications).") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(2) - .max_length(64) - .schema(); + "Media set naming template (may contain strftime() time format specifications).", +) +.format(&SINGLE_LINE_COMMENT_FORMAT) +.min_length(2) +.max_length(64) +.schema(); -pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = - ApiStringFormat::VerifyFn(|s| { MediaSetPolicy::from_str(s)?; Ok(()) }); +pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { + MediaSetPolicy::from_str(s)?; + Ok(()) +}); -pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = StringSchema::new( - "Media set allocation policy ('continue', 'always', or a calendar event).") - .format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT) - .schema(); +pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = + StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).") + .format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT) + .schema(); /// Media set allocation policy pub enum MediaSetPolicy { @@ -68,13 +69,15 @@ impl std::str::FromStr for MediaSetPolicy { } } -pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = - ApiStringFormat::VerifyFn(|s| { RetentionPolicy::from_str(s)?; Ok(()) }); +pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { + RetentionPolicy::from_str(s)?; + Ok(()) +}); -pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = StringSchema::new( - "Media retention policy ('overwrite', 'keep', or time span).") - .format(&MEDIA_RETENTION_POLICY_FORMAT) - .schema(); +pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = + StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).") + .format(&MEDIA_RETENTION_POLICY_FORMAT) + .schema(); /// Media retention Policy pub enum RetentionPolicy { @@ -130,29 +133,29 @@ impl std::str::FromStr for RetentionPolicy { }, }, )] -#[derive(Serialize,Deserialize,Updater)] +#[derive(Serialize, Deserialize, Updater)] /// Media pool configuration pub struct MediaPoolConfig { /// The pool name #[updater(skip)] pub name: String, /// Media Set allocation policy - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub allocation: Option, /// Media retention policy - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub retention: Option, /// Media set naming template (default "%c") /// /// The template is UTF8 text, and can include strftime time /// format specifications. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub template: Option, /// Encryption key fingerprint /// /// If set, encrypt all data using the specified key. - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub encrypt: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, } diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index 58777a52..c90ebd0e 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -24,31 +24,28 @@ pub use media::*; use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, const_regex, Schema, StringSchema, ApiStringFormat}; +use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; use proxmox_uuid::Uuid; -use crate::{ - FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, -}; +use crate::{BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, FINGERPRINT_SHA256_FORMAT}; -const_regex!{ +const_regex! { pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$"); } pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX); -pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new( - "Tape encryption key fingerprint (sha256)." -) - .format(&FINGERPRINT_SHA256_FORMAT) - .schema(); +pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = + StringSchema::new("Tape encryption key fingerprint (sha256).") + .format(&FINGERPRINT_SHA256_FORMAT) + .schema(); -pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new( - "A snapshot in the format: 'store:type/id/time") - .format(&TAPE_RESTORE_SNAPSHOT_FORMAT) - .type_text("store:type/id/time") - .schema(); +pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = + StringSchema::new("A snapshot in the format: 'store:type/id/time") + .format(&TAPE_RESTORE_SNAPSHOT_FORMAT) + .type_text("store:type/id/time") + .schema(); #[api( properties: { @@ -78,8 +75,8 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new( }, }, )] -#[derive(Serialize,Deserialize)] -#[serde(rename_all="kebab-case")] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] /// Content list filter parameters pub struct MediaContentListFilter { pub pool: Option, diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index a1fcb7b5..d29f18b4 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -1,16 +1,16 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, Schema, IntegerSchema, StringSchema, Updater}; +use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; use crate::{ - HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, - PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, + HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, + SINGLE_LINE_COMMENT_SCHEMA, }; -pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = StringSchema::new( - "Timeframe to specify when the rule is actice.") - .format(&DAILY_DURATION_FORMAT) - .schema(); +pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = + StringSchema::new("Timeframe to specify when the rule is actice.") + .format(&DAILY_DURATION_FORMAT) + .schema(); pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") .format(&PROXMOX_SAFE_ID_FORMAT) @@ -18,15 +18,15 @@ pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") .max_length(32) .schema(); -pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = IntegerSchema::new( - "Rate limit (for Token bucket filter) in bytes/second.") - .minimum(100_000) - .schema(); +pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = + IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.") + .minimum(100_000) + .schema(); -pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( - "Size of the token bucket (for Token bucket filter) in bytes.") - .minimum(1000) - .schema(); +pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = + IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.") + .minimum(1000) + .schema(); #[api( properties: { @@ -48,17 +48,17 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = IntegerSchema::new( }, }, )] -#[derive(Serialize,Deserialize,Default,Clone,Updater)] +#[derive(Serialize, Deserialize, Default, Clone, Updater)] #[serde(rename_all = "kebab-case")] /// Rate Limit Configuration pub struct RateLimitConfig { - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub rate_in: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub burst_in: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub rate_out: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub burst_out: Option, } @@ -100,13 +100,13 @@ impl RateLimitConfig { }, }, )] -#[derive(Serialize,Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater)] #[serde(rename_all = "kebab-case")] /// Traffic control rule pub struct TrafficControlRule { #[updater(skip)] pub name: String, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, /// Rule applies to Source IPs within this networks pub network: Vec, @@ -117,6 +117,6 @@ pub struct TrafficControlRule { // #[serde(skip_serializing_if="Option::is_none")] // pub shared: Option, /// Enable the rule at specific times - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub timeframe: Option>, } diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 94ed07c0..56d82537 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -1,22 +1,22 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{ - api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater, -}; +use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater}; -use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA}; +use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new( - "Enable the account (default). You can set this to '0' to disable the account.") - .default(true) - .schema(); + "Enable the account (default). You can set this to '0' to disable the account.", +) +.default(true) +.schema(); pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new( - "Account expiration date (seconds since epoch). '0' means no expiration date.") - .default(0) - .minimum(0) - .schema(); + "Account expiration date (seconds since epoch). '0' means no expiration date.", +) +.default(0) +.minimum(0) +.schema(); pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.") .format(&SINGLE_LINE_COMMENT_FORMAT) @@ -75,23 +75,23 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.") }, } )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] /// User properties with added list of ApiTokens pub struct UserWithTokens { pub userid: Userid, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub enable: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub expire: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub firstname: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub lastname: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub email: Option, - #[serde(skip_serializing_if="Vec::is_empty", default)] + #[serde(skip_serializing_if = "Vec::is_empty", default)] pub tokens: Vec, } @@ -114,15 +114,15 @@ pub struct UserWithTokens { }, } )] -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] /// ApiToken properties. pub struct ApiToken { pub tokenid: Authid, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub enable: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub expire: Option, } @@ -132,7 +132,7 @@ impl ApiToken { return false; } if let Some(expire) = self.expire { - let now = proxmox_time::epoch_i64(); + let now = proxmox_time::epoch_i64(); if expire > 0 && expire <= now { return false; } @@ -172,22 +172,22 @@ impl ApiToken { }, } )] -#[derive(Serialize,Deserialize,Updater)] +#[derive(Serialize, Deserialize, Updater)] /// User properties. pub struct User { #[updater(skip)] pub userid: Userid, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub enable: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub expire: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub firstname: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub lastname: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub email: Option, } @@ -197,7 +197,7 @@ impl User { return false; } if let Some(expire) = self.expire { - let now = proxmox_time::epoch_i64(); + let now = proxmox_time::epoch_i64(); if expire > 0 && expire <= now { return false; } diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 12843c3d..90dcd02e 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -39,15 +39,35 @@ use proxmox_schema::{ // slash is not allowed because it is used as pve API delimiter // also see "man useradd" #[macro_export] -macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") } +macro_rules! USER_NAME_REGEX_STR { + () => { + r"(?:[^\s:/[:cntrl:]]+)" + }; +} #[macro_export] -macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) } +macro_rules! GROUP_NAME_REGEX_STR { + () => { + USER_NAME_REGEX_STR!() + }; +} #[macro_export] -macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) } +macro_rules! TOKEN_NAME_REGEX_STR { + () => { + PROXMOX_SAFE_ID_REGEX_STR!() + }; +} #[macro_export] -macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) } +macro_rules! USER_ID_REGEX_STR { + () => { + concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!()) + }; +} #[macro_export] -macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) } +macro_rules! APITOKEN_ID_REGEX_STR { + () => { + concat!(USER_ID_REGEX_STR!(), r"!", TOKEN_NAME_REGEX_STR!()) + }; +} const_regex! { pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$"); @@ -238,7 +258,8 @@ impl TryFrom for Realm { type Error = Error; fn try_from(s: String) -> Result { - PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s) + PROXMOX_AUTH_REALM_STRING_SCHEMA + .check_constraints(&s) .map_err(|_| format_err!("invalid realm"))?; Ok(Self(s)) @@ -249,7 +270,8 @@ impl<'a> TryFrom<&'a str> for &'a RealmRef { type Error = Error; fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> { - PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s) + PROXMOX_AUTH_REALM_STRING_SCHEMA + .check_constraints(s) .map_err(|_| format_err!("invalid realm"))?; Ok(RealmRef::new(s)) @@ -482,7 +504,8 @@ impl std::str::FromStr for Userid { bail!("invalid user name in user id"); } - PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm) + PROXMOX_AUTH_REALM_STRING_SCHEMA + .check_constraints(realm) .map_err(|_| format_err!("invalid realm in user id"))?; Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm)))) @@ -503,7 +526,8 @@ impl TryFrom for Userid { bail!("invalid user name in user id"); } - PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..]) + PROXMOX_AUTH_REALM_STRING_SCHEMA + .check_constraints(&data[(name_len + 1)..]) .map_err(|_| format_err!("invalid realm in user id"))?; Ok(Self { data, name_len }) @@ -532,7 +556,7 @@ impl PartialEq for Userid { #[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)] pub struct Authid { user: Userid, - tokenname: Option + tokenname: Option, } impl ApiType for Authid { @@ -652,7 +676,7 @@ impl TryFrom for Authid { data.truncate(realm_end); - let user:Userid = data.parse()?; + let user: Userid = data.parse()?; Ok(Self { user, tokenname }) } @@ -679,7 +703,10 @@ fn test_token_id() { let token_userid = auth_id.user(); assert_eq!(&userid, token_userid); assert!(auth_id.is_token()); - assert_eq!(auth_id.tokenname().expect("Token has tokenname").as_str(), TokennameRef::new("bar").as_str()); + assert_eq!( + auth_id.tokenname().expect("Token has tokenname").as_str(), + TokennameRef::new("bar").as_str() + ); assert_eq!(auth_id.to_string(), "test@pam!bar".to_string()); } diff --git a/pbs-api-types/src/zfs.rs b/pbs-api-types/src/zfs.rs index 5fe49561..b62af6cb 100644 --- a/pbs-api-types/src/zfs.rs +++ b/pbs-api-types/src/zfs.rs @@ -6,8 +6,7 @@ const_regex! { pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$"; } -pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new( - "Pool sector size exponent.") +pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.") .minimum(9) .maximum(16) .default(12) @@ -59,7 +58,7 @@ pub enum ZfsRaidLevel { #[api()] #[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// zpool list item pub struct ZpoolListItem { /// zpool name From 35786fe37ebe750d67de695c012ccdc20cfecfda Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 12 Apr 2022 05:25:56 +0000 Subject: [PATCH 100/299] api-types: add maintenance type + bump proxmox-schema dep to 1.2.1 (for quoted property string) Signed-off-by: Hannes Laimer --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/datastore.rs | 23 +++++++++++-- pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/maintenance.rs | 59 ++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 pbs-api-types/src/maintenance.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index e89c217b..485aee8b 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,7 +14,7 @@ regex = "1.5.5" serde = { version = "1.0", features = ["derive"] } proxmox-lang = "1.0.0" -proxmox-schema = { version = "1.1", features = [ "api-macro" ] } +proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] } proxmox-serde = "0.1" proxmox-time = "1.1.1" proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index ea60e023..01e2319a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -6,9 +6,9 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, Userid, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, - PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, - UPID, + Authid, CryptMode, Fingerprint, MaintenanceMode, Userid, DATASTORE_NOTIFY_STRING_SCHEMA, + GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, + SINGLE_LINE_COMMENT_SCHEMA, UPID, }; const_regex! { @@ -262,6 +262,11 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore optional: true, schema: DATASTORE_TUNING_STRING_SCHEMA, }, + "maintenance-mode": { + optional: true, + format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), + type: String, + }, } )] #[derive(Serialize, Deserialize, Updater)] @@ -302,6 +307,18 @@ pub struct DataStoreConfig { /// Datastore tuning options #[serde(skip_serializing_if = "Option::is_none")] pub tuning: Option, + /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " + #[serde(skip_serializing_if = "Option::is_none")] + pub maintenance_mode: Option, +} + +impl DataStoreConfig { + pub fn get_maintenance_mode(&self) -> Option { + self.maintenance_mode + .as_ref() + .and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok()) + .and_then(|value| MaintenanceMode::deserialize(value).ok()) + } } #[api( diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 03119f4a..d121d26e 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -49,6 +49,9 @@ pub use jobs::*; mod key_derivation; pub use key_derivation::{Kdf, KeyInfo}; +mod maintenance; +pub use maintenance::*; + mod network; pub use network::*; diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs new file mode 100644 index 00000000..9b74e9b9 --- /dev/null +++ b/pbs-api-types/src/maintenance.rs @@ -0,0 +1,59 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{api, ApiStringFormat, const_regex, Schema, StringSchema}; + +const_regex!{ + pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$"; +} + +pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX); + + +pub const MAINTENANCE_MESSAGE_SCHEMA: Schema = + StringSchema::new("Message describing the reason for the maintenance.") + .format(&MAINTENANCE_MESSAGE_FORMAT) + .max_length(64) + .schema(); + +#[derive(Clone, Copy, Debug)] +/// Operation requirements, used when checking for maintenance mode. +pub enum Operation { + Read, + Write, +} + +#[api] +#[derive(Deserialize, Serialize)] +#[serde(rename_all="kebab-case")] +/// Maintenance type. +pub enum MaintenanceType { + /// Only read operations are allowed on the datastore. + ReadOnly, + /// Neither read nor write operations are allowed on the datastore. + Offline, +} + +#[api( + properties: { + type: { + type: MaintenanceType, + }, + message: { + optional: true, + schema: MAINTENANCE_MESSAGE_SCHEMA, + } + }, + default_key: "type", +)] +#[derive(Deserialize, Serialize)] +/// Maintenance mode +pub struct MaintenanceMode { + /// Type of maintenance ("read-only" or "offline"). + #[serde(rename = "type")] + ty: MaintenanceType, + + /// Reason for maintenance. + #[serde(skip_serializing_if = "Option::is_none")] + message: Option, +} From 50fa7bad491ca63f316dd1aa39a5df4b2c3d0075 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 12 Apr 2022 05:25:57 +0000 Subject: [PATCH 101/299] datastore: add check for maintenance in lookup Signed-off-by: Hannes Laimer --- pbs-api-types/src/maintenance.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 9b74e9b9..f8d4dad3 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; +use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; use proxmox_schema::{api, ApiStringFormat, const_regex, Schema, StringSchema}; @@ -24,7 +26,7 @@ pub enum Operation { } #[api] -#[derive(Deserialize, Serialize)] +#[derive(Deserialize, Serialize, PartialEq)] #[serde(rename_all="kebab-case")] /// Maintenance type. pub enum MaintenanceType { @@ -57,3 +59,20 @@ pub struct MaintenanceMode { #[serde(skip_serializing_if = "Option::is_none")] message: Option, } + +impl MaintenanceMode { + pub fn check(&self, operation: Option) -> Result<(), Error> { + let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or("")) + .decode_utf8() + .unwrap_or(Cow::Borrowed("")); + + if self.ty == MaintenanceType::Offline { + bail!("offline maintenance mode: {}", message); + } else if self.ty == MaintenanceType::ReadOnly { + if let Some(Operation::Write) = operation { + bail!("read-only maintenance mode: {}", message); + } + } + Ok(()) + } +} From b635dc3ee112f822ce46a45260cfc00fa1883b4d Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 14 Apr 2022 14:03:46 +0200 Subject: [PATCH 102/299] rust fmt for pbs src Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/maintenance.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index f8d4dad3..dd3de50a 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -1,17 +1,16 @@ -use std::borrow::Cow; use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; -use proxmox_schema::{api, ApiStringFormat, const_regex, Schema, StringSchema}; +use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; -const_regex!{ +const_regex! { pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$"; } pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX); - pub const MAINTENANCE_MESSAGE_SCHEMA: Schema = StringSchema::new("Message describing the reason for the maintenance.") .format(&MAINTENANCE_MESSAGE_FORMAT) @@ -27,7 +26,7 @@ pub enum Operation { #[api] #[derive(Deserialize, Serialize, PartialEq)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// Maintenance type. pub enum MaintenanceType { /// Only read operations are allowed on the datastore. From 32ea4b56a10edd26feb88cfc06237b6bc510dab9 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 14 Apr 2022 15:05:58 +0200 Subject: [PATCH 103/299] api-types: introduce BackupType enum and Group/Dir api types The type is a real enum. All are API types and implement Display and FromStr. The ordering is the same as it is in pbs-datastore. Also, they are now flattened into a few structs instead of being copied manually. Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 285 +++++++++++++++++++++++++++++---- pbs-api-types/src/tape/mod.rs | 6 +- 2 files changed, 253 insertions(+), 38 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 01e2319a..92579f61 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,3 +1,6 @@ +use std::fmt; + +use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use proxmox_schema::{ @@ -394,17 +397,244 @@ pub struct SnapshotVerifyState { pub state: VerifyState, } +#[api] +/// Backup types. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum BackupType { + /// Virtual machines. + Vm, + + /// Containers. + Ct, + + /// "Host" backups. + Host, +} + +impl BackupType { + pub const fn as_str(&self) -> &'static str { + match self { + BackupType::Vm => "vm", + BackupType::Ct => "ct", + BackupType::Host => "host", + } + } + + /// We used to have alphabetical ordering here when this was a string. + const fn order(self) -> u8 { + match self { + BackupType::Ct => 0, + BackupType::Host => 1, + BackupType::Vm => 2, + } + } +} + +impl fmt::Display for BackupType { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_str(), f) + } +} + +impl std::str::FromStr for BackupType { + type Err = Error; + + /// Parse a backup type. + fn from_str(ty: &str) -> Result { + Ok(match ty { + "ct" => BackupType::Ct, + "host" => BackupType::Host, + "vm" => BackupType::Vm, + _ => bail!("invalid backup type {ty:?}"), + }) + } +} + +impl std::cmp::Ord for BackupType { + #[inline] + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.order().cmp(&other.order()) + } +} + +impl std::cmp::PartialOrd for BackupType { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + #[api( properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "backup-time": { - schema: BACKUP_TIME_SCHEMA, - }, + "backup-type": { type: BackupType }, + "backup-id": { schema: BACKUP_ID_SCHEMA }, + }, +)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// A backup group (without a data store). +pub struct BackupGroup { + /// Backup type. + #[serde(rename = "backup-type")] + pub ty: BackupType, + + /// Backup id. + #[serde(rename = "backup-id")] + pub id: String, +} + +impl BackupGroup { + pub fn new>(ty: BackupType, id: T) -> Self { + Self { ty, id: id.into() } + } +} + +impl From<(BackupType, String)> for BackupGroup { + fn from(data: (BackupType, String)) -> Self { + Self { + ty: data.0, + id: data.1, + } + } +} + +impl std::cmp::Ord for BackupGroup { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let type_order = self.ty.cmp(&other.ty); + if type_order != std::cmp::Ordering::Equal { + return type_order; + } + // try to compare IDs numerically + let id_self = self.id.parse::(); + let id_other = other.id.parse::(); + match (id_self, id_other) { + (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other), + (Ok(_), Err(_)) => std::cmp::Ordering::Less, + (Err(_), Ok(_)) => std::cmp::Ordering::Greater, + _ => self.id.cmp(&other.id), + } + } +} + +impl std::cmp::PartialOrd for BackupGroup { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl fmt::Display for BackupGroup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.ty, self.id) + } +} + +impl std::str::FromStr for BackupGroup { + type Err = Error; + + /// Parse a backup group. + /// + /// This parses strings like `vm/100". + fn from_str(path: &str) -> Result { + let cap = GROUP_PATH_REGEX + .captures(path) + .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; + + Ok(Self { + ty: cap.get(1).unwrap().as_str().parse()?, + id: cap.get(2).unwrap().as_str().to_owned(), + }) + } +} + +#[api( + properties: { + "group": { type: BackupGroup }, + "backup-time": { schema: BACKUP_TIME_SCHEMA }, + }, +)] +/// Uniquely identify a Backup (relative to data store) +/// +/// We also call this a backup snaphost. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct BackupDir { + /// Backup group. + #[serde(flatten)] + pub group: BackupGroup, + + /// Backup timestamp unix epoch. + #[serde(rename = "backup-time")] + pub time: i64, +} + +impl From<(BackupGroup, i64)> for BackupDir { + fn from(data: (BackupGroup, i64)) -> Self { + Self { + group: data.0, + time: data.1, + } + } +} + +impl From<(BackupType, String, i64)> for BackupDir { + fn from(data: (BackupType, String, i64)) -> Self { + Self { + group: (data.0, data.1).into(), + time: data.2, + } + } +} + +impl BackupDir { + pub fn with_rfc3339(ty: BackupType, id: T, backup_time_string: &str) -> Result + where + T: Into, + { + let time = proxmox_time::parse_rfc3339(&backup_time_string)?; + let group = BackupGroup::new(ty, id.into()); + Ok(Self { group, time }) + } + + pub fn ty(&self) -> BackupType { + self.group.ty + } + + pub fn id(&self) -> &str { + &self.group.id + } +} + +impl std::str::FromStr for BackupDir { + type Err = Error; + + /// Parse a snapshot path. + /// + /// This parses strings like `host/elsa/2020-06-15T05:18:33Z". + fn from_str(path: &str) -> Result { + let cap = SNAPSHOT_PATH_REGEX + .captures(path) + .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; + + BackupDir::with_rfc3339( + cap.get(1).unwrap().as_str().parse()?, + cap.get(2).unwrap().as_str(), + cap.get(3).unwrap().as_str(), + ) + } +} + +impl std::fmt::Display for BackupDir { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // FIXME: log error? + let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?; + write!(f, "{}/{}", self.group, time) + } +} + +#[api( + properties: { + "backup": { type: BackupDir }, comment: { schema: SINGLE_LINE_COMMENT_SCHEMA, optional: true, @@ -432,9 +662,8 @@ pub struct SnapshotVerifyState { #[serde(rename_all = "kebab-case")] /// Basic information about backup snapshot. pub struct SnapshotListItem { - pub backup_type: String, // enum - pub backup_id: String, - pub backup_time: i64, + #[serde(flatten)] + pub backup: BackupDir, /// The first line from manifest "notes" #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, @@ -459,15 +688,8 @@ pub struct SnapshotListItem { #[api( properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "last-backup": { - schema: BACKUP_TIME_SCHEMA, - }, + "backup": { type: BackupGroup }, + "last-backup": { schema: BACKUP_TIME_SCHEMA }, "backup-count": { type: Integer, }, @@ -486,8 +708,9 @@ pub struct SnapshotListItem { #[serde(rename_all = "kebab-case")] /// Basic information about a backup group. pub struct GroupListItem { - pub backup_type: String, // enum - pub backup_id: String, + #[serde(flatten)] + pub backup: BackupGroup, + pub last_backup: i64, /// Number of contained snapshots pub backup_count: u64, @@ -503,24 +726,16 @@ pub struct GroupListItem { #[api( properties: { - "backup-type": { - schema: BACKUP_TYPE_SCHEMA, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - }, - "backup-time": { - schema: BACKUP_TIME_SCHEMA, - }, + "backup": { type: BackupDir }, }, )] #[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Prune result. pub struct PruneListItem { - pub backup_type: String, // enum - pub backup_id: String, - pub backup_time: i64, + #[serde(flatten)] + pub backup: BackupDir, + /// Keep snapshot pub keep: bool, } diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index c90ebd0e..0b60eefa 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -27,7 +27,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; use proxmox_uuid::Uuid; -use crate::{BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, FINGERPRINT_SHA256_FORMAT}; +use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; const_regex! { pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$"); @@ -66,7 +66,7 @@ pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = optional: true, }, "backup-type": { - schema: BACKUP_TYPE_SCHEMA, + type: BackupType, optional: true, }, "backup-id": { @@ -83,6 +83,6 @@ pub struct MediaContentListFilter { pub label_text: Option, pub media: Option, pub media_set: Option, - pub backup_type: Option, + pub backup_type: Option, pub backup_id: Option, } From 027033c17a8ae483011e2a7c5859a7a9d9196566 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 20 Apr 2022 09:40:05 +0200 Subject: [PATCH 104/299] RemoteWithoutPassword: new API type To make it explicit that we do not return the password. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/remote.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index 1ebc9d4c..890e31c0 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -85,3 +85,22 @@ pub struct Remote { #[serde(flatten)] pub config: RemoteConfig, } + +#[api( + properties: { + name: { + schema: REMOTE_ID_SCHEMA, + }, + config: { + type: RemoteConfig, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Remote properties. +pub struct RemoteWithoutPassword { + pub name: String, + #[serde(flatten)] + pub config: RemoteConfig, +} From b12dc1e501763c4b52e80d003b56988a2715a8c5 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 20 Apr 2022 09:58:15 +0200 Subject: [PATCH 105/299] AuthId: derive Ord and PartialOrd So the we can sort... Signed-off-by: Dietmar Maurer --- pbs-api-types/src/userid.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 90dcd02e..ecbae9c2 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -327,7 +327,7 @@ impl PartialEq for &RealmRef { /// The token ID part of an API token authentication id. /// /// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases. -#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq, Deserialize, Serialize)] pub struct Tokenname(String); /// A reference to a token name part of an authentication id. This alone does NOT uniquely identify @@ -420,7 +420,7 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef { } /// A complete user id consisting of a user name and a realm -#[derive(Clone, Debug, PartialEq, Eq, Hash, UpdaterType)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, UpdaterType)] pub struct Userid { data: String, name_len: usize, @@ -553,7 +553,7 @@ impl PartialEq for Userid { } /// A complete authentication id consisting of a user id and an optional token name. -#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType, Ord, PartialOrd)] pub struct Authid { user: Userid, tokenname: Option, From c4c67bdcfb5fbfa405ac4344c73891e5ee04b6d9 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 19 Apr 2022 12:11:16 +0200 Subject: [PATCH 106/299] api-types: datastore type improvements let BackupGroup implement Hash let BackupGroup and BackupDir be AsRef let BackupDir be AsRef the pbs-datastore types will implement these AsRefs as well Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 38 +++++++++++++++++++++++++++++++++- pbs-api-types/src/jobs.rs | 1 + 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 92579f61..de4d51a1 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -471,7 +471,7 @@ impl std::cmp::PartialOrd for BackupType { "backup-id": { schema: BACKUP_ID_SCHEMA }, }, )] -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// A backup group (without a data store). pub struct BackupGroup { @@ -488,6 +488,28 @@ impl BackupGroup { pub fn new>(ty: BackupType, id: T) -> Self { Self { ty, id: id.into() } } + + pub fn matches(&self, filter: &crate::GroupFilter) -> bool { + use crate::GroupFilter; + + match filter { + GroupFilter::Group(backup_group) => { + match backup_group.parse::() { + Ok(group) => *self == group, + Err(_) => false, // shouldn't happen if value is schema-checked + } + } + GroupFilter::BackupType(backup_type) => self.ty.as_str() == backup_type, + GroupFilter::Regex(regex) => regex.is_match(&self.to_string()), + } + } +} + +impl AsRef for BackupGroup { + #[inline] + fn as_ref(&self) -> &Self { + self + } } impl From<(BackupType, String)> for BackupGroup { @@ -568,6 +590,20 @@ pub struct BackupDir { pub time: i64, } +impl AsRef for BackupDir { + #[inline] + fn as_ref(&self) -> &BackupGroup { + &self.group + } +} + +impl AsRef for BackupDir { + #[inline] + fn as_ref(&self) -> &Self { + self + } +} + impl From<(BackupGroup, i64)> for BackupDir { fn from(data: (BackupGroup, i64)) -> Self { Self { diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 654c0477..e859e755 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -342,6 +342,7 @@ pub struct TapeBackupJobStatus { /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. pub enum GroupFilter { /// BackupGroup type - either `vm`, `ct`, or `host`. + // FIXME: Should be `BackupType` BackupType(String), /// Full identifier of BackupGroup, including type Group(String), From cc652721306dba4943a8a433e6d024f2f390275f Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 20 Apr 2022 11:45:53 +0200 Subject: [PATCH 107/299] api-types: use BackupType for GroupFilter::BackupType instead of a string Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/jobs.rs | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index de4d51a1..7b988b93 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -499,7 +499,7 @@ impl BackupGroup { Err(_) => false, // shouldn't happen if value is schema-checked } } - GroupFilter::BackupType(backup_type) => self.ty.as_str() == backup_type, + GroupFilter::BackupType(ty) => self.ty == *ty, GroupFilter::Regex(regex) => regex.is_match(&self.to_string()), } } diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e859e755..6da6a1b2 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, + Authid, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -342,8 +342,7 @@ pub struct TapeBackupJobStatus { /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. pub enum GroupFilter { /// BackupGroup type - either `vm`, `ct`, or `host`. - // FIXME: Should be `BackupType` - BackupType(String), + BackupType(BackupType), /// Full identifier of BackupGroup, including type Group(String), /// A regular expression matched against the full identifier of the BackupGroup @@ -356,7 +355,7 @@ impl std::str::FromStr for GroupFilter { fn from_str(s: &str) -> Result { match s.split_once(":") { Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), - Some(("type", value)) => BACKUP_TYPE_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::BackupType(value.to_string())), + Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)), Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), None => Err(format_err!("input doesn't match expected format '|regex:REGEX>'")), From f3d07e6f153037250c52fe39a8d3aeb70cea87f1 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 20 Apr 2022 15:06:28 +0200 Subject: [PATCH 108/299] api-types: DataStoreConfig::new for testing so our examples can more easily access a datastore without going over a configuration & cache Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 7b988b93..2bff64b5 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -316,6 +316,27 @@ pub struct DataStoreConfig { } impl DataStoreConfig { + pub fn new(name: String, path: String) -> Self { + Self { + name, + path, + comment: None, + gc_schedule: None, + prune_schedule: None, + keep_last: None, + keep_hourly: None, + keep_daily: None, + keep_weekly: None, + keep_monthly: None, + keep_yearly: None, + verify_new: None, + notify_user: None, + notify: None, + tuning: None, + maintenance_mode: None, + } + } + pub fn get_maintenance_mode(&self) -> Option { self.maintenance_mode .as_ref() From a63b50f79a6076883cceb94883a36960dfb82747 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 25 Apr 2022 11:48:14 +0200 Subject: [PATCH 109/299] api types: datastore status: reword doc comment of estimated_full_date Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 2bff64b5..bab4d19a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -959,10 +959,9 @@ pub struct DataStoreStatusListItem { #[serde(skip_serializing_if = "Option::is_none")] pub history_delta: Option, /// Estimation of the UNIX epoch when the storage will be full. - /// This is calculated via a simple Linear Regression (Least - /// Squares) of RRD data of the last Month. Missing if there are - /// not enough data points yet. If the estimate lies in the past, - /// the usage is decreasing or not changing. + /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the + /// last Month. Missing if not enough data points are available yet. An estimate in the past + /// means that usage is declining or not changing. #[serde(skip_serializing_if = "Option::is_none")] pub estimated_full_date: Option, /// An error description, for example, when the datastore could not be looked up From a4f552f73861eb45b8e41633f9d39676282b4a5f Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 26 Apr 2022 06:23:31 +0000 Subject: [PATCH 110/299] api2: DataStoreListItem add maintenance info Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index bab4d19a..df891cac 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -354,6 +354,11 @@ impl DataStoreConfig { optional: true, schema: SINGLE_LINE_COMMENT_SCHEMA, }, + maintenance: { + optional: true, + format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), + type: String, + } }, )] #[derive(Serialize, Deserialize)] @@ -362,6 +367,9 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + /// If the datastore is in maintenance mode, information about it + #[serde(skip_serializing_if = "Option::is_none")] + pub maintenance: Option, } #[api( From c2049bce7fd082148751cb3324ef5b69026cd2bd Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 29 Apr 2022 12:07:12 +0200 Subject: [PATCH 111/299] api: status: return gc-status again Returning the GC status was dropped by mistake in commit fdcb2694 ("datastore status: factor out api type DataStoreStatusListItem") As this is considered a breaking change which we also felt, due to the gc-status being used in the web interface for the datastore overview list (not the dashboard), re add it. Fixes: fdcb2694 Signed-off-by: Dominik Csapak [ T: add reference to breaking commit, reword message ] Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index df891cac..e0b900d9 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -975,6 +975,9 @@ pub struct DataStoreStatusListItem { /// An error description, for example, when the datastore could not be looked up #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, + /// Status of last GC + #[serde(skip_serializing_if = "Option::is_none")] + pub gc_status: Option, } pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { From f64272b9484e8f264082ffe5a423d45bdd4db9fa Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 3 May 2022 12:39:47 +0200 Subject: [PATCH 112/299] api types: BackupType: add iter for enum Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index e0b900d9..0500e85c 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -439,6 +439,7 @@ pub enum BackupType { /// "Host" backups. Host, + // NOTE: if you add new types, don't forget to adapt the iter below! } impl BackupType { @@ -458,6 +459,10 @@ impl BackupType { BackupType::Vm => 2, } } + + pub const fn iter() -> &'static [BackupType] { + &[Self::Vm, Self::Ct, Self::Host] + } } impl fmt::Display for BackupType { From c38e22c93c91198a39b83fb2377035ad992cebe6 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 7 Apr 2021 12:28:50 +0200 Subject: [PATCH 113/299] api-types: add BackupNamespace type The idea is to have namespaces in a datastore to allow grouping and namespacing backups from different (but similar trusted) sources, e.g., different PVE clusters, geo sites, use-cases or company service-branches, without separating the underlying deduplication domain and thus blowing up data and (GC/verify) resource usage. To avoid namespace ID clashes with anything existing or future usecases use a intermediate `ns` level on *each* depth. The current implementation treats that as internal and thus hides that fact from the API, iow., the namespace path the users passes along or gets returned won't include the `ns` level, they do not matter there at all. The max-depth of 8 is chosen with the following in mind: - assume that end-users already are in a deeper level of a hierarchy, most often they'll start at level one or two, as the higher ones are used by the seller/admin to namespace different users/groups, so lower than four would be very limiting for a lot of target use cases - all the more, a PBS could be used as huge second level archive in a big company, so one could imagine a namespace structure like: /////// e.g.: /us/east-coast/dc12345/financial/report-storage/cassandra/ that's six levels that one can imagine for a reasonable use-case, leave some room for the ones harder to imagine ;-) - on the other hand, we do not want to allow unlimited levels as we have request parameter limits and deep nesting can create other issues as well (e.g., stack exhaustion), so doubling the minimum level of 4 (1st point) we got room to breath even for the more odd (or huge) use cases (2nd point) - a per-level length of 32 (-1 due to separator) is enough to use telling names, making lives of users and admin simpler, but not blowing up parameter total length with the max depth of 8 - 8 * 32 = 256 which is nice buffer size Much thanks for Wolfgang for all the great work on the type implementation and assisting greatly with the design. Co-authored-by: Wolfgang Bumiller Co-authored-by: Thomas Lamprecht Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/datastore.rs | 271 +++++++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 8 + 3 files changed, 280 insertions(+) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 485aee8b..8f897d5d 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -12,6 +12,7 @@ lazy_static = "1.4" percent-encoding = "2.1" regex = "1.5.5" serde = { version = "1.0", features = ["derive"] } +serde_plain = "1" proxmox-lang = "1.0.0" proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] } diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 0500e85c..8eb2ad62 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,4 +1,5 @@ use std::fmt; +use std::path::{Path, PathBuf}; use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; @@ -27,6 +28,8 @@ const_regex! { pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); } @@ -43,6 +46,8 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX); +pub const BACKUP_NAMESPACE_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&BACKUP_NAMESPACE_REGEX); pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") .format(&BACKUP_ID_FORMAT) @@ -64,6 +69,13 @@ pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group") .format(&BACKUP_GROUP_FORMAT) .schema(); +pub const MAX_NAMESPACE_DEPTH: usize = 8; +pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256 +pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.") + .format(&BACKUP_NAMESPACE_FORMAT) + .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256 + .schema(); + pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) @@ -426,6 +438,265 @@ pub struct SnapshotVerifyState { pub state: VerifyState, } +/// A namespace provides a logical separation between backup groups from different domains +/// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a +/// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and +/// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids. +/// +/// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as +/// the chunk store is still shared. So, users whom do not trust each other must not share a +/// datastore. +/// +/// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid +/// clashes with backup group IDs and future backup_types and to have a clean separation between +/// the namespace directories and the ones from a backup snapshot. +#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct BackupNamespace { + /// The namespace subdirectories without the `ns/` intermediate directories. + inner: Vec, + + /// Cache the total length for efficiency. + len: usize, +} + +impl BackupNamespace { + /// Returns a root namespace reference. + pub const fn root() -> Self { + Self { + inner: Vec::new(), + len: 0, + } + } + + /// True if this represents the root namespace. + pub fn is_root(&self) -> bool { + self.inner.is_empty() + } + + /// Try to parse a string into a namespace. + pub fn new(name: &str) -> Result { + let mut this = Self::root(); + for name in name.split('/') { + this.push(name.to_string())?; + } + Ok(this) + } + + /* + /// Try to parse a file system path (where each sub-namespace is separated by an `ns` + /// subdirectory) into a valid namespace. + pub fn from_path>(path: T) -> Result { + use std::path::Component; + + let mut this = Self::root(); + let mut next_is_ns = true; + for component in path.as_ref().components() { + match component { + Component::Normal(component) if next_is_ns => { + if component.to_str() != Some("ns") { + bail!("invalid component in path: {:?}", component); + } + next_is_ns = false; + } + Component::Normal(component) => { + this.push( + component + .to_str() + .ok_or_else(|| { + format_err!("invalid component in path: {:?}", component) + })? + .to_string(), + )?; + next_is_ns = true; + } + Component::RootDir => { + next_is_ns = true; + } + _ => bail!("invalid component in path: {:?}", component.as_os_str()), + } + } + + Ok(this) + } + */ + + /// Try to parse a file path string (where each sub-namespace is separated by an `ns` + /// subdirectory) into a valid namespace. + pub fn from_path(mut path: &str) -> Result { + let mut this = Self::root(); + loop { + match path.strip_prefix("ns/") { + Some(next) => match next.find('/') { + Some(pos) => { + this.push(next[..pos].to_string())?; + path = &next[(pos + 1)..]; + } + None => { + this.push(next.to_string())?; + break; + } + }, + None if !path.is_empty() => { + bail!("invalid component in namespace path at {:?}", path); + } + None => break, + } + } + Ok(this) + } + + /// Create a new namespace directly from a vec. + /// + /// # Safety + /// + /// Invalid contents may lead to inaccessible backups. + pub unsafe fn from_vec_unchecked(components: Vec) -> Self { + let mut this = Self { + inner: components, + len: 0, + }; + this.recalculate_len(); + this + } + + /// Recalculate the length. + fn recalculate_len(&mut self) { + self.len = self.inner.len().max(1) - 1; // a slash between each component + for part in &self.inner { + self.len += part.len(); + } + } + + /// The hierarchical depth of the namespace, 0 means top-level. + pub fn depth(&self) -> usize { + self.inner.len() + } + + /// The logical name and ID of the namespace. + pub fn name(&self) -> String { + self.to_string() + } + + /// The actual relative backing path of the namespace on the datastore. + pub fn path(&self) -> PathBuf { + self.display_as_path().to_string().into() + } + + /// Get the current namespace length. + /// + /// This includes separating slashes, but does not include the `ns/` intermediate directories. + /// This is not the *path* length, but rather the length that would be produced via + /// `.to_string()`. + #[inline] + pub fn name_len(&self) -> usize { + self.len + } + + /// Get the current namespace path length. + /// + /// This includes the `ns/` subdirectory strings. + pub fn path_len(&self) -> usize { + self.name_len() + 3 * self.inner.len() + } + + /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long. + pub fn push(&mut self, subdir: String) -> Result<(), Error> { + if subdir.contains('/') { + bail!("namespace component contained a slash"); + } + + self.push_do(subdir) + } + + /// Assumes `subdir` already does not contain any slashes. + /// Performs remaining checks and updates the length. + fn push_do(&mut self, subdir: String) -> Result<(), Error> { + if self.depth() >= MAX_NAMESPACE_DEPTH { + bail!( + "namespace to deep, {} > max {}", + self.inner.len(), + MAX_NAMESPACE_DEPTH + ); + } + + if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH { + bail!("namespace length exceeded"); + } + + if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) { + bail!("not a valid namespace component"); + } + + if !self.inner.is_empty() { + self.len += 1; // separating slash + } + self.len += subdir.len(); + self.inner.push(subdir); + Ok(()) + } + + /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every + /// component. + fn display_as_path(&self) -> BackupNamespacePath { + BackupNamespacePath(self) + } + + /// Iterate over the subdirectories. + pub fn components(&self) -> impl Iterator + '_ { + self.inner.iter().map(String::as_str) + } +} + +impl fmt::Display for BackupNamespace { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use std::fmt::Write; + + let mut parts = self.inner.iter(); + if let Some(first) = parts.next() { + f.write_str(first)?; + } + for part in parts { + f.write_char('/')?; + f.write_str(part)?; + } + Ok(()) + } +} + +serde_plain::derive_deserialize_from_fromstr!(BackupNamespace, "valid backup namespace"); + +impl std::str::FromStr for BackupNamespace { + type Err = Error; + + fn from_str(name: &str) -> Result { + Self::new(name) + } +} + +serde_plain::derive_serialize_from_display!(BackupNamespace); + +impl ApiType for BackupNamespace { + const API_SCHEMA: Schema = BACKUP_NAMESPACE_SCHEMA; +} + +/// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`]. +/// +/// This implements [`Display`] such that it includes the `ns/` subdirectory prefix in front of +/// every component. +pub struct BackupNamespacePath<'a>(&'a BackupNamespace); + +impl fmt::Display for BackupNamespacePath<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut sep = "ns/"; + for part in &self.0.inner { + f.write_str(sep)?; + sep = "/ns/"; + f.write_str(part)?; + } + Ok(()) + } +} + #[api] /// Backup types. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index d121d26e..459a01f5 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -26,6 +26,14 @@ macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") } #[macro_export] macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") } +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_NS_RE { + () => ( + concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!()) + ); +} + #[rustfmt::skip] #[macro_export] macro_rules! SNAPSHOT_PATH_REGEX_STR { From 686c4cd2508df7be4196ea2b381b38e7266ad2a1 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 5 May 2022 17:15:31 +0200 Subject: [PATCH 114/299] ns: max depth: set constant to upper inclusive boundary makes usage a bit simpler, e.g., the api maximum can use that 1:1 then. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8eb2ad62..8c7ebad0 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -69,7 +69,10 @@ pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group") .format(&BACKUP_GROUP_FORMAT) .schema(); -pub const MAX_NAMESPACE_DEPTH: usize = 8; +/// The maximal, inclusive depth for namespaces from the root ns downwards +/// +/// The datastore root name space is at depth zero (0), so we have in total eight (8) levels +pub const MAX_NAMESPACE_DEPTH: usize = 7; pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256 pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.") .format(&BACKUP_NAMESPACE_FORMAT) @@ -611,7 +614,7 @@ impl BackupNamespace { /// Assumes `subdir` already does not contain any slashes. /// Performs remaining checks and updates the length. fn push_do(&mut self, subdir: String) -> Result<(), Error> { - if self.depth() >= MAX_NAMESPACE_DEPTH { + if self.depth() > MAX_NAMESPACE_DEPTH { bail!( "namespace to deep, {} > max {}", self.inner.len(), From 0af9b691462d88b18172ee59c593d82e6f9af651 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 21 Apr 2022 15:04:59 +0200 Subject: [PATCH 115/299] api-types: add namespace to BackupGroup Make it easier by adding an helper accepting either group or directory Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 125 ++++++++++++++++++++++++++------- pbs-api-types/src/lib.rs | 20 +++++- 2 files changed, 120 insertions(+), 25 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8c7ebad0..b2ef001b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; @@ -16,19 +16,24 @@ use crate::{ }; const_regex! { + pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); - pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); + pub GROUP_PATH_REGEX = concat!( + r"^(", BACKUP_NS_PATH_RE!(), r")?", + r"(", BACKUP_TYPE_RE!(), ")/", + r"(", BACKUP_ID_RE!(), r")$", + ); pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); - - pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); } @@ -640,7 +645,7 @@ impl BackupNamespace { /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every /// component. - fn display_as_path(&self) -> BackupNamespacePath { + pub fn display_as_path(&self) -> BackupNamespacePath { BackupNamespacePath(self) } @@ -775,6 +780,7 @@ impl std::cmp::PartialOrd for BackupType { #[api( properties: { + "backup-ns": { type: BackupNamespace }, "backup-type": { type: BackupType }, "backup-id": { schema: BACKUP_ID_SCHEMA }, }, @@ -783,6 +789,14 @@ impl std::cmp::PartialOrd for BackupType { #[serde(rename_all = "kebab-case")] /// A backup group (without a data store). pub struct BackupGroup { + /// An optional namespace this backup belongs to. + #[serde( + rename = "backup-ns", + skip_serializing_if = "BackupNamespace::is_root", + default + )] + pub ns: BackupNamespace, + /// Backup type. #[serde(rename = "backup-type")] pub ty: BackupType, @@ -793,8 +807,12 @@ pub struct BackupGroup { } impl BackupGroup { - pub fn new>(ty: BackupType, id: T) -> Self { - Self { ty, id: id.into() } + pub fn new>(ns: BackupNamespace, ty: BackupType, id: T) -> Self { + Self { + ns, + ty, + id: id.into(), + } } pub fn matches(&self, filter: &crate::GroupFilter) -> bool { @@ -820,21 +838,29 @@ impl AsRef for BackupGroup { } } -impl From<(BackupType, String)> for BackupGroup { - fn from(data: (BackupType, String)) -> Self { +impl From<(BackupNamespace, BackupType, String)> for BackupGroup { + #[inline] + fn from(data: (BackupNamespace, BackupType, String)) -> Self { Self { - ty: data.0, - id: data.1, + ns: data.0, + ty: data.1, + id: data.2, } } } impl std::cmp::Ord for BackupGroup { fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let ns_order = self.ns.cmp(&other.ns); + if ns_order != std::cmp::Ordering::Equal { + return ns_order; + } + let type_order = self.ty.cmp(&other.ty); if type_order != std::cmp::Ordering::Equal { return type_order; } + // try to compare IDs numerically let id_self = self.id.parse::(); let id_other = other.id.parse::(); @@ -855,7 +881,11 @@ impl std::cmp::PartialOrd for BackupGroup { impl fmt::Display for BackupGroup { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.ty, self.id) + if self.ns.is_root() { + write!(f, "{}/{}", self.ty, self.id) + } else { + write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id) + } } } @@ -871,8 +901,9 @@ impl std::str::FromStr for BackupGroup { .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; Ok(Self { - ty: cap.get(1).unwrap().as_str().parse()?, - id: cap.get(2).unwrap().as_str().to_owned(), + ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?, + ty: cap.get(2).unwrap().as_str().parse()?, + id: cap.get(3).unwrap().as_str().to_owned(), }) } } @@ -921,32 +952,44 @@ impl From<(BackupGroup, i64)> for BackupDir { } } -impl From<(BackupType, String, i64)> for BackupDir { - fn from(data: (BackupType, String, i64)) -> Self { +impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir { + fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self { Self { - group: (data.0, data.1).into(), - time: data.2, + group: (data.0, data.1, data.2).into(), + time: data.3, } } } impl BackupDir { - pub fn with_rfc3339(ty: BackupType, id: T, backup_time_string: &str) -> Result + pub fn with_rfc3339( + ns: BackupNamespace, + ty: BackupType, + id: T, + backup_time_string: &str, + ) -> Result where T: Into, { let time = proxmox_time::parse_rfc3339(&backup_time_string)?; - let group = BackupGroup::new(ty, id.into()); + let group = BackupGroup::new(ns, ty, id.into()); Ok(Self { group, time }) } + #[inline] pub fn ty(&self) -> BackupType { self.group.ty } + #[inline] pub fn id(&self) -> &str { &self.group.id } + + #[inline] + pub fn ns(&self) -> &BackupNamespace { + &self.group.ns + } } impl std::str::FromStr for BackupDir { @@ -960,22 +1003,56 @@ impl std::str::FromStr for BackupDir { .captures(path) .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; + let ns = match cap.get(1) { + Some(cap) => BackupNamespace::from_path(cap.as_str())?, + None => BackupNamespace::root(), + }; BackupDir::with_rfc3339( - cap.get(1).unwrap().as_str().parse()?, - cap.get(2).unwrap().as_str(), + ns, + cap.get(2).unwrap().as_str().parse()?, cap.get(3).unwrap().as_str(), + cap.get(4).unwrap().as_str(), ) } } -impl std::fmt::Display for BackupDir { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for BackupDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: log error? let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?; write!(f, "{}/{}", self.group, time) } } +/// Used when both a backup group or a directory can be valid. +pub enum BackupPart { + Group(BackupGroup), + Dir(BackupDir), +} + +impl std::str::FromStr for BackupPart { + type Err = Error; + + /// Parse a path which can be either a backup group or a snapshot dir. + fn from_str(path: &str) -> Result { + let cap = GROUP_OR_SNAPSHOT_PATH_REGEX + .captures(path) + .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; + + let ns = match cap.get(1) { + Some(cap) => BackupNamespace::from_path(cap.as_str())?, + None => BackupNamespace::root(), + }; + let ty = cap.get(2).unwrap().as_str().parse()?; + let id = cap.get(3).unwrap().as_str().to_string(); + + Ok(match cap.get(4) { + Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?), + None => BackupPart::Group((ns, ty, id).into()), + }) + } +} + #[api( properties: { "backup": { type: BackupDir }, diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 459a01f5..4f40a27f 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -34,14 +34,32 @@ macro_rules! BACKUP_NS_RE { ); } +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_NS_PATH_RE { + () => ( + concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!()) + ); +} + #[rustfmt::skip] #[macro_export] macro_rules! SNAPSHOT_PATH_REGEX_STR { () => ( - concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")") + concat!( + r"(", BACKUP_NS_PATH_RE!(), ")?", + r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", + ) ); } +#[macro_export] +macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { + () => { + concat!(SNAPSHOT_PATH_REGEX_STR!(), "?") + }; +} + mod acl; pub use acl::*; From 1682d9ae0db7056cab3cd272fd63345f3f19b38c Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sun, 24 Apr 2022 20:21:45 +0200 Subject: [PATCH 116/299] api types: namespace: include problematic component in error Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index b2ef001b..7b279084 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -632,7 +632,7 @@ impl BackupNamespace { } if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) { - bail!("not a valid namespace component"); + bail!("not a valid namespace component: {subdir}"); } if !self.inner.is_empty() { From 0cd80471d057da043c1c7e386a0730f234cb70a0 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sun, 24 Apr 2022 20:21:57 +0200 Subject: [PATCH 117/299] api types: namespace: add from_parent_ns helper will be used in the (recursive) namespace iterator Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 7b279084..40ae3d7a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -553,6 +553,16 @@ impl BackupNamespace { Ok(this) } + /// Create a new Namespace attached to parent + /// + /// `name` must be a single level namespace ID, that is, no '/' is allowed. + /// This rule also avoids confusion about the name being a NS or NS-path + pub fn from_parent_ns(parent: &Self, name: String) -> Result { + let mut child = parent.to_owned(); + child.push(name)?; + Ok(child) + } + /// Create a new namespace directly from a vec. /// /// # Safety From 0deda0cacff70709252afb63a4f9fd2de4138252 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 25 Apr 2022 11:33:12 +0200 Subject: [PATCH 118/299] api-types: add missing slash in optional ns path regex Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/lib.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 40ae3d7a..f78c7f26 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -25,7 +25,7 @@ const_regex! { pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); pub GROUP_PATH_REGEX = concat!( - r"^(", BACKUP_NS_PATH_RE!(), r")?", + r"^(", BACKUP_NS_PATH_RE!(), r"/)?", r"(", BACKUP_TYPE_RE!(), ")/", r"(", BACKUP_ID_RE!(), r")$", ); diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4f40a27f..17eeb52b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -47,16 +47,20 @@ macro_rules! BACKUP_NS_PATH_RE { macro_rules! SNAPSHOT_PATH_REGEX_STR { () => ( concat!( - r"(", BACKUP_NS_PATH_RE!(), ")?", + r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?", r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", ) ); } +#[rustfmt::skip] #[macro_export] macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { () => { - concat!(SNAPSHOT_PATH_REGEX_STR!(), "?") + concat!( + r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?", + r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?", + ) }; } From 731d4783ce58ca61be9b8a1902f7fbccfd5456f3 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 25 Apr 2022 11:52:12 +0200 Subject: [PATCH 119/299] api-types: more regex fixups Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 17eeb52b..4f0c9203 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -30,7 +30,7 @@ macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9] #[macro_export] macro_rules! BACKUP_NS_RE { () => ( - concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!()) + concat!(r"(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!()) ); } @@ -38,7 +38,7 @@ macro_rules! BACKUP_NS_RE { #[macro_export] macro_rules! BACKUP_NS_PATH_RE { () => ( - concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!()) + concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!()) ); } From 2c593cd38c5dc13779892ce75afcdd0c65452a50 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 28 Apr 2022 09:37:10 +0200 Subject: [PATCH 120/299] api types: BackupNamespace add pop & parent helpers Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index f78c7f26..32558066 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -563,6 +563,25 @@ impl BackupNamespace { Ok(child) } + /// Pop one level off the namespace hierachy + pub fn pop(&mut self) { + if let Some(dropped) = self.inner.pop() { + self.len = self.len.saturating_sub(dropped.len() + 1); + } + } + + /// Get the namespace parent as owned BackupNamespace + pub fn parent(&self) -> Self { + if self.is_root() { + return Self::root(); + } + + let mut parent = self.clone(); + parent.pop(); + + parent + } + /// Create a new namespace directly from a vec. /// /// # Safety From e2cf8920ea17ca4d7db2bb4fd49ea5c62f8df577 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 29 Apr 2022 12:11:32 +0200 Subject: [PATCH 121/299] api-types: fixup backup-ns being optional Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 32558066..b75bcf60 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -809,7 +809,7 @@ impl std::cmp::PartialOrd for BackupType { #[api( properties: { - "backup-ns": { type: BackupNamespace }, + "backup-ns": { type: BackupNamespace, optional: true }, "backup-type": { type: BackupType }, "backup-id": { schema: BACKUP_ID_SCHEMA }, }, From 1b3a49c595cf399ae299fc3c6e5c0bc581f30d71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 29 Apr 2022 13:25:13 +0200 Subject: [PATCH 122/299] BackupNamespace: fix deserialize of root NS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index b75bcf60..5ca102a2 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -484,6 +484,11 @@ impl BackupNamespace { /// Try to parse a string into a namespace. pub fn new(name: &str) -> Result { let mut this = Self::root(); + + if name.is_empty() { + return Ok(this); + } + for name in name.split('/') { this.push(name.to_string())?; } From 1f35bbc4dcd6984e35e1fcbc2b3b0911cac2f3ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 29 Apr 2022 13:42:58 +0200 Subject: [PATCH 123/299] api: derive UpdaterType for BackupNamespace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 5ca102a2..55e4c152 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{ api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, - Schema, StringSchema, Updater, + Schema, StringSchema, Updater, UpdaterType, }; use crate::{ @@ -458,7 +458,7 @@ pub struct SnapshotVerifyState { /// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid /// clashes with backup group IDs and future backup_types and to have a clean separation between /// the namespace directories and the ones from a backup snapshot. -#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)] pub struct BackupNamespace { /// The namespace subdirectories without the `ns/` intermediate directories. inner: Vec, From cf93fbb8935661865f60359bdcf29a7fe92cbe2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 29 Apr 2022 13:43:58 +0200 Subject: [PATCH 124/299] api: add prefix-mapping helper to BackupNamespace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit given a namespace, a source prefix and a target prefix this helper strips the source prefix and replaces it with the target one (erroring out if the prefix doesn't match). Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 55e4c152..1a68c356 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -687,6 +687,33 @@ impl BackupNamespace { pub fn components(&self) -> impl Iterator + '_ { self.inner.iter().map(String::as_str) } + + /// Map NS by replacing `source_prefix` with `target_prefix` + pub fn map_prefix( + &self, + source_prefix: &BackupNamespace, + target_prefix: &BackupNamespace, + ) -> Result { + let mut mapped = target_prefix.clone(); + let mut source_iter = source_prefix.components(); + let mut self_iter = self.components(); + + while let Some(comp) = self_iter.next() { + if let Some(source_comp) = source_iter.next() { + if source_comp != comp { + bail!( + "Failed to map namespace - {} is not a valid prefix of {}", + source_prefix, + self + ); + } + continue; + } + mapped.push(comp.to_owned())?; + } + + Ok(mapped) + } } impl fmt::Display for BackupNamespace { From 47d14e1aed40dfc2b6bc9c1e9cf30c828e563880 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 29 Apr 2022 13:48:54 +0200 Subject: [PATCH 125/299] api: add NS_MAX_DEPTH_SCHEMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler [ T: renamed from NAMESPACE_RECURSION_DEPTH_SCHEMA & moved to from jobs to datastore ] Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 1a68c356..010fcc6e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -84,6 +84,13 @@ pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.") .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256 .schema(); +pub const NS_MAX_DEPTH_SCHEMA: Schema = + IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)") + .minimum(0) + .maximum(MAX_NAMESPACE_DEPTH as isize) + .default(0) + .schema(); + pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) From 1e0c87d48f38ff78247b079dcc42efe1a0231bb9 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Sun, 24 Apr 2022 20:24:42 +0200 Subject: [PATCH 126/299] api: namespace management endpoints allow to list any namespace with privileges on it and allow to create and delete namespaces if the user has modify permissions on the parent namespace. Creation is only allowed if the parent NS already exists. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 010fcc6e..af60d435 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1213,6 +1213,22 @@ pub struct GroupListItem { pub comment: Option, } +#[api()] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a backup namespace. +pub struct NamespaceListItem { + /// A backup namespace + pub ns: BackupNamespace, + + // TODO? + //pub group_count: u64, + //pub ns_count: u64, + /// The first line from the namespace's "notes" + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} + #[api( properties: { "backup": { type: BackupDir }, @@ -1431,6 +1447,15 @@ pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { .schema(), }; +pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE: ReturnType = ReturnType { + optional: false, + schema: &ArraySchema::new( + "Returns the list of backup namespaces.", + &NamespaceListItem::API_SCHEMA, + ) + .schema(), +}; + pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { optional: false, schema: &ArraySchema::new( From afcf4896bab2034528dcbd7dd4dd22f01a5061d0 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Mon, 9 May 2022 15:39:29 +0200 Subject: [PATCH 127/299] split the namespace out of BackupGroup/Dir api types We decided to go this route because it'll most likely be safer in the API as we need to explicitly add namespaces support to the various API endpoints this way. For example, 'pull' should have 2 namespaces: local and remote, and the GroupFilter (which would otherwise contain exactly *one* namespace parameter) needs to be applied for both sides (to decide what to pull from the remote, and what to *remove* locally as cleanup). The *datastore* types still contain the namespace and have a `.backup_ns()` getter. Note that the datastore's `Display` implementations are no longer safe to use as a deserializable string. Additionally, some datastore based methods now have been exposed via the BackupGroup/BackupDir types to avoid a "round trip" in code. Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 90 +++++++++------------------------- pbs-api-types/src/lib.rs | 10 ---- 2 files changed, 23 insertions(+), 77 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index af60d435..33216bcf 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -25,8 +25,7 @@ const_regex! { pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); pub GROUP_PATH_REGEX = concat!( - r"^(", BACKUP_NS_PATH_RE!(), r"/)?", - r"(", BACKUP_TYPE_RE!(), ")/", + r"^(", BACKUP_TYPE_RE!(), ")/", r"(", BACKUP_ID_RE!(), r")$", ); @@ -848,7 +847,6 @@ impl std::cmp::PartialOrd for BackupType { #[api( properties: { - "backup-ns": { type: BackupNamespace, optional: true }, "backup-type": { type: BackupType }, "backup-id": { schema: BACKUP_ID_SCHEMA }, }, @@ -857,14 +855,6 @@ impl std::cmp::PartialOrd for BackupType { #[serde(rename_all = "kebab-case")] /// A backup group (without a data store). pub struct BackupGroup { - /// An optional namespace this backup belongs to. - #[serde( - rename = "backup-ns", - skip_serializing_if = "BackupNamespace::is_root", - default - )] - pub ns: BackupNamespace, - /// Backup type. #[serde(rename = "backup-type")] pub ty: BackupType, @@ -875,12 +865,8 @@ pub struct BackupGroup { } impl BackupGroup { - pub fn new>(ns: BackupNamespace, ty: BackupType, id: T) -> Self { - Self { - ns, - ty, - id: id.into(), - } + pub fn new>(ty: BackupType, id: T) -> Self { + Self { ty, id: id.into() } } pub fn matches(&self, filter: &crate::GroupFilter) -> bool { @@ -906,24 +892,18 @@ impl AsRef for BackupGroup { } } -impl From<(BackupNamespace, BackupType, String)> for BackupGroup { +impl From<(BackupType, String)> for BackupGroup { #[inline] - fn from(data: (BackupNamespace, BackupType, String)) -> Self { + fn from(data: (BackupType, String)) -> Self { Self { - ns: data.0, - ty: data.1, - id: data.2, + ty: data.0, + id: data.1, } } } impl std::cmp::Ord for BackupGroup { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - let ns_order = self.ns.cmp(&other.ns); - if ns_order != std::cmp::Ordering::Equal { - return ns_order; - } - let type_order = self.ty.cmp(&other.ty); if type_order != std::cmp::Ordering::Equal { return type_order; @@ -949,11 +929,7 @@ impl std::cmp::PartialOrd for BackupGroup { impl fmt::Display for BackupGroup { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.ns.is_root() { - write!(f, "{}/{}", self.ty, self.id) - } else { - write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id) - } + write!(f, "{}/{}", self.ty, self.id) } } @@ -969,9 +945,8 @@ impl std::str::FromStr for BackupGroup { .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; Ok(Self { - ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?, - ty: cap.get(2).unwrap().as_str().parse()?, - id: cap.get(3).unwrap().as_str().to_owned(), + ty: cap.get(1).unwrap().as_str().parse()?, + id: cap.get(2).unwrap().as_str().to_owned(), }) } } @@ -1020,27 +995,22 @@ impl From<(BackupGroup, i64)> for BackupDir { } } -impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir { - fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self { +impl From<(BackupType, String, i64)> for BackupDir { + fn from(data: (BackupType, String, i64)) -> Self { Self { - group: (data.0, data.1, data.2).into(), - time: data.3, + group: (data.0, data.1).into(), + time: data.2, } } } impl BackupDir { - pub fn with_rfc3339( - ns: BackupNamespace, - ty: BackupType, - id: T, - backup_time_string: &str, - ) -> Result + pub fn with_rfc3339(ty: BackupType, id: T, backup_time_string: &str) -> Result where T: Into, { let time = proxmox_time::parse_rfc3339(&backup_time_string)?; - let group = BackupGroup::new(ns, ty, id.into()); + let group = BackupGroup::new(ty, id.into()); Ok(Self { group, time }) } @@ -1053,11 +1023,6 @@ impl BackupDir { pub fn id(&self) -> &str { &self.group.id } - - #[inline] - pub fn ns(&self) -> &BackupNamespace { - &self.group.ns - } } impl std::str::FromStr for BackupDir { @@ -1071,15 +1036,10 @@ impl std::str::FromStr for BackupDir { .captures(path) .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; - let ns = match cap.get(1) { - Some(cap) => BackupNamespace::from_path(cap.as_str())?, - None => BackupNamespace::root(), - }; BackupDir::with_rfc3339( - ns, - cap.get(2).unwrap().as_str().parse()?, + cap.get(1).unwrap().as_str().parse()?, + cap.get(2).unwrap().as_str(), cap.get(3).unwrap().as_str(), - cap.get(4).unwrap().as_str(), ) } } @@ -1107,16 +1067,12 @@ impl std::str::FromStr for BackupPart { .captures(path) .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; - let ns = match cap.get(1) { - Some(cap) => BackupNamespace::from_path(cap.as_str())?, - None => BackupNamespace::root(), - }; - let ty = cap.get(2).unwrap().as_str().parse()?; - let id = cap.get(3).unwrap().as_str().to_string(); + let ty = cap.get(1).unwrap().as_str().parse()?; + let id = cap.get(2).unwrap().as_str().to_string(); - Ok(match cap.get(4) { - Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?), - None => BackupPart::Group((ns, ty, id).into()), + Ok(match cap.get(3) { + Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?), + None => BackupPart::Group((ty, id).into()), }) } } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4f0c9203..192acc71 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -34,20 +34,11 @@ macro_rules! BACKUP_NS_RE { ); } -#[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_NS_PATH_RE { - () => ( - concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!()) - ); -} - #[rustfmt::skip] #[macro_export] macro_rules! SNAPSHOT_PATH_REGEX_STR { () => ( concat!( - r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?", r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", ) ); @@ -58,7 +49,6 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { () => { concat!( - r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?", r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?", ) }; From 586c9f468d762f86c06ee0aa82fbc3fef36a64c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 5 May 2022 13:47:26 +0200 Subject: [PATCH 128/299] api: namespace: return popped component MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit helpful for places where namespaces need to be (re)created Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 33216bcf..a6d6f625 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -575,10 +575,12 @@ impl BackupNamespace { } /// Pop one level off the namespace hierachy - pub fn pop(&mut self) { - if let Some(dropped) = self.inner.pop() { + pub fn pop(&mut self) -> Option { + let dropped = self.inner.pop(); + if let Some(ref dropped) = dropped { self.len = self.len.saturating_sub(dropped.len() + 1); } + dropped } /// Get the namespace parent as owned BackupNamespace From 21667bed72e5c878af23089536390cf36ab1bcf1 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 10 May 2022 14:40:34 +0200 Subject: [PATCH 129/299] api-types: allow empty namespace Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 192acc71..f604c892 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -30,7 +30,9 @@ macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9] #[macro_export] macro_rules! BACKUP_NS_RE { () => ( - concat!(r"(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!()) + concat!("(?:", + "(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!(), + ")?") ); } From 77d4a752b6604f7b55be193f6dec286c893cfd80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 5 May 2022 10:58:26 +0200 Subject: [PATCH 130/299] api: add DatastoreWithNamespace helper struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a6d6f625..6d846ae8 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1054,6 +1054,22 @@ impl fmt::Display for BackupDir { } } +/// Helper struct for places where sensible formatting of store+NS combo is required +pub struct DatastoreWithNamespace { + pub store: String, + pub ns: BackupNamespace, +} + +impl fmt::Display for DatastoreWithNamespace { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.ns.is_root() { + write!(f, "{}", self.store) + } else { + write!(f, "{}/{}", self.store, self.ns) + } + } +} + /// Used when both a backup group or a directory can be valid. pub enum BackupPart { Group(BackupGroup), From c358973e1b6aeb01c32dc032df88255ea302ee2c Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 10 May 2022 19:04:17 +0200 Subject: [PATCH 131/299] api: verify: support namespaces Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 6da6a1b2..46ae4fe2 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, DATASTORE_SCHEMA, - DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, - SINGLE_LINE_COMMENT_SCHEMA, + Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, + PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; const_regex! { @@ -182,6 +182,10 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = optional: true, schema: VERIFICATION_SCHEDULE_SCHEMA, }, + ns: { + optional: true, + schema: BACKUP_NAMESPACE_SCHEMA, + }, } )] #[derive(Serialize, Deserialize, Updater)] @@ -205,6 +209,9 @@ pub struct VerificationJobConfig { #[serde(skip_serializing_if = "Option::is_none")] /// when to schedule this job in calendar event notation pub schedule: Option, + #[serde(skip_serializing_if = "Option::is_none", default)] + /// on which backup namespace to run the verification recursively + pub ns: Option, } #[api( From e9413966786e7d7bfec1cf4a60d1fc4218ee7872 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 11 May 2022 09:43:10 +0200 Subject: [PATCH 132/299] verify job: support max-depth config Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 46ae4fe2..87009b3a 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -186,6 +186,10 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = optional: true, schema: BACKUP_NAMESPACE_SCHEMA, }, + "max-depth": { + optional: true, + schema: crate::NS_MAX_DEPTH_SCHEMA, + }, } )] #[derive(Serialize, Deserialize, Updater)] @@ -212,6 +216,10 @@ pub struct VerificationJobConfig { #[serde(skip_serializing_if = "Option::is_none", default)] /// on which backup namespace to run the verification recursively pub ns: Option, + #[serde(skip_serializing_if = "Option::is_none", default)] + /// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the + /// snapshots on the same level as the passed `ns`, or the datastore root if none. + pub max_depth: Option, } #[api( From 8f21c992a743e0ae538fdc794fd81b8e274325d9 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 11 May 2022 12:26:25 +0200 Subject: [PATCH 133/299] api-types: rework BackupNamespace::map_prefix to use slice::strip_prefix() from std Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 6d846ae8..9a549fb0 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -702,25 +702,22 @@ impl BackupNamespace { source_prefix: &BackupNamespace, target_prefix: &BackupNamespace, ) -> Result { - let mut mapped = target_prefix.clone(); - let mut source_iter = source_prefix.components(); - let mut self_iter = self.components(); + let suffix = self + .inner + .strip_prefix(&source_prefix.inner[..]) + .ok_or_else(|| { + format_err!( + "Failed to map namespace - {} is not a valid prefix of {}", + source_prefix, + self + ) + })?; - while let Some(comp) = self_iter.next() { - if let Some(source_comp) = source_iter.next() { - if source_comp != comp { - bail!( - "Failed to map namespace - {} is not a valid prefix of {}", - source_prefix, - self - ); - } - continue; - } - mapped.push(comp.to_owned())?; + let mut new = target_prefix.clone(); + for item in suffix { + new.push(item.clone())?; } - - Ok(mapped) + Ok(new) } } From 718504f164f7c2224706cd279e775cdeaefd4582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 29 Apr 2022 13:46:14 +0200 Subject: [PATCH 134/299] sync/pull: make namespace aware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow pulling all groups from a certain source namespace, and possibly sub namespaces until max-depth, into a target namespace. If any sub-namespaces get pulled, they will be mapped relatively from the source parent namespace to the target parent namespace. Signed-off-by: Fabian Grünbichler Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 17 +++++++++++++++-- pbs-api-types/src/jobs.rs | 24 +++++++++++++++++++++--- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 9a549fb0..0ef4240b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1060,9 +1060,22 @@ pub struct DatastoreWithNamespace { impl fmt::Display for DatastoreWithNamespace { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.ns.is_root() { - write!(f, "{}", self.store) + write!(f, "datastore {}, root namespace", self.store) } else { - write!(f, "{}/{}", self.store, self.ns) + write!(f, "datastore '{}', namespace '{}'", self.store, self.ns) + } + } +} + +impl DatastoreWithNamespace { + pub fn acl_path(&self) -> Vec<&str> { + let mut path: Vec<&str> = vec!["datastore", &self.store]; + + if self.ns.is_root() { + path + } else { + path.extend(self.ns.inner.iter().map(|comp| comp.as_str())); + path } } } diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 87009b3a..368e60e3 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -9,15 +9,15 @@ use proxmox_schema::*; use crate::{ Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, - PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, + NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; const_regex! { /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); - /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID' - pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); + /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' + pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(?:", BACKUP_NS_RE!(), r"):"); } pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") @@ -413,6 +413,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = store: { schema: DATASTORE_SCHEMA, }, + ns: { + type: BackupNamespace, + optional: true, + }, "owner": { type: Authid, optional: true, @@ -423,10 +427,18 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = "remote-store": { schema: DATASTORE_SCHEMA, }, + "remote-ns": { + type: BackupNamespace, + optional: true, + }, "remove-vanished": { schema: REMOVE_VANISHED_BACKUPS_SCHEMA, optional: true, }, + "max-depth": { + schema: NS_MAX_DEPTH_SCHEMA, + optional: true, + }, comment: { optional: true, schema: SINGLE_LINE_COMMENT_SCHEMA, @@ -452,11 +464,17 @@ pub struct SyncJobConfig { pub id: String, pub store: String, #[serde(skip_serializing_if = "Option::is_none")] + pub ns: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, pub remote: String, pub remote_store: String, #[serde(skip_serializing_if = "Option::is_none")] + pub remote_ns: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub remove_vanished: Option, + #[serde(default)] + pub max_depth: usize, #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, #[serde(skip_serializing_if = "Option::is_none")] From 51e1d3c1fd9f363b9403e80fae6325b9046d7f53 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 12 May 2022 11:31:07 +0200 Subject: [PATCH 135/299] datastore: add new Lookup for operations tracking We sometimes need to do some in-memory only stuff, e.g., to check if GC is already running for a datastore, which is a try_lock on a mutex that is in-memory. Actually the whole thing would be nicer if we could guarantee to hold the correct contract statically, e.g., like https://docs.rust-embedded.org/book/static-guarantees/design-contracts.html Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/maintenance.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index dd3de50a..2102cf2c 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -20,8 +20,17 @@ pub const MAINTENANCE_MESSAGE_SCHEMA: Schema = #[derive(Clone, Copy, Debug)] /// Operation requirements, used when checking for maintenance mode. pub enum Operation { + /// for any read operation like backup restore or RRD metric collection Read, + /// for any write/delete operation, like backup create or GC Write, + /// for any purely logical operation on the in-memory state of the datastore, e.g., to check if + /// some mutex could be locked (e.g., GC already running?) + /// + /// NOTE: one must *not* do any IO operations when only helding this Op state + Lookup, + + // GarbageCollect or Delete? } #[api] @@ -29,6 +38,12 @@ pub enum Operation { #[serde(rename_all = "kebab-case")] /// Maintenance type. pub enum MaintenanceType { + // TODO: + // - Add "unmounting" once we got pluggable datastores + // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate + // operation, so that one can enable a mode where nothing new can be added but stuff can be + // cleaned + /// Only read operations are allowed on the datastore. ReadOnly, /// Neither read nor write operations are allowed on the datastore. @@ -65,7 +80,9 @@ impl MaintenanceMode { .decode_utf8() .unwrap_or(Cow::Borrowed("")); - if self.ty == MaintenanceType::Offline { + if let Some(Operation::Lookup) = operation { + return Ok(()); + } else if self.ty == MaintenanceType::Offline { bail!("offline maintenance mode: {}", message); } else if self.ty == MaintenanceType::ReadOnly { if let Some(Operation::Write) = operation { From 92984a159d3de8a674eac314d7f076b81e18893d Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 12 May 2022 11:54:21 +0200 Subject: [PATCH 136/299] cargo fmt Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/maintenance.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 2102cf2c..5bbba043 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -29,7 +29,6 @@ pub enum Operation { /// /// NOTE: one must *not* do any IO operations when only helding this Op state Lookup, - // GarbageCollect or Delete? } @@ -43,7 +42,6 @@ pub enum MaintenanceType { // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate // operation, so that one can enable a mode where nothing new can be added but stuff can be // cleaned - /// Only read operations are allowed on the datastore. ReadOnly, /// Neither read nor write operations are allowed on the datastore. From 37c6fdafd1182b1f41979dadd99acebc28635f08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 12 May 2022 16:44:52 +0200 Subject: [PATCH 137/299] pull/sync: treat unset max-depth as full recursion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to be consistent with tape backup and verification jobs. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 368e60e3..56d87e2b 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -473,8 +473,8 @@ pub struct SyncJobConfig { pub remote_ns: Option, #[serde(skip_serializing_if = "Option::is_none")] pub remove_vanished: Option, - #[serde(default)] - pub max_depth: usize, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_depth: Option, #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, #[serde(skip_serializing_if = "Option::is_none")] From 79dae5df9d4cdd7040a5e3c0dc2c0b6f187f0308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 13 May 2022 11:25:01 +0200 Subject: [PATCH 138/299] namespaces: move max-depth check to api type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and use it when creating a sync job, and simplify the check on updating (only check the final, resulting config instead of each intermediate version). Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 0ef4240b..47813cbd 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -719,6 +719,17 @@ impl BackupNamespace { } Ok(new) } + + /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit + pub fn check_max_depth(&self, depth: usize) -> Result<(), Error> { + let ns_depth = self.depth(); + if ns_depth + depth > MAX_NAMESPACE_DEPTH { + bail!( + "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}", + ); + } + Ok(()) + } } impl fmt::Display for BackupNamespace { From ed3dd6644e6b81cdba4cf5eab90ebb15ab9b05a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 13 May 2022 11:26:55 +0200 Subject: [PATCH 139/299] api: split max-depth schema/types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit into the regular one (with default == MAX) and the one used for pull/sync, where the default is 'None' which actually means the remote end reduces the scope of sync automatically (or, if needed, backwards-compat mode without any remote namespaces at all). Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 6 ++++++ pbs-api-types/src/jobs.rs | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 47813cbd..1212ff89 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -90,6 +90,12 @@ pub const NS_MAX_DEPTH_SCHEMA: Schema = .default(0) .schema(); +pub const NS_MAX_DEPTH_REDUCED_SCHEMA: Schema = +IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)") + .minimum(0) + .maximum(MAX_NAMESPACE_DEPTH as isize) + .schema(); + pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 56d87e2b..2df19cfb 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -9,7 +9,8 @@ use proxmox_schema::*; use crate::{ Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, - NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, + NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, + SINGLE_LINE_COMMENT_SCHEMA, }; const_regex! { @@ -436,7 +437,7 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = optional: true, }, "max-depth": { - schema: NS_MAX_DEPTH_SCHEMA, + schema: NS_MAX_DEPTH_REDUCED_SCHEMA, optional: true, }, comment: { From f3b18f723308437d7d124b6c368dad7207a20055 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Fri, 13 May 2022 11:41:35 +0200 Subject: [PATCH 140/299] api types: set NS_MAX_DEPTH schema default to MAX_NAMESPACE_DEPTH Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 1212ff89..f88a933e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -87,7 +87,7 @@ pub const NS_MAX_DEPTH_SCHEMA: Schema = IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)") .minimum(0) .maximum(MAX_NAMESPACE_DEPTH as isize) - .default(0) + .default(MAX_NAMESPACE_DEPTH as isize) .schema(); pub const NS_MAX_DEPTH_REDUCED_SCHEMA: Schema = From 26604f31a55bfbca9e973df96c188edc0770fe2b Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Fri, 13 May 2022 12:41:45 +0200 Subject: [PATCH 141/299] datastore: inline some format variables Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index f88a933e..58c26a87 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -713,9 +713,7 @@ impl BackupNamespace { .strip_prefix(&source_prefix.inner[..]) .ok_or_else(|| { format_err!( - "Failed to map namespace - {} is not a valid prefix of {}", - source_prefix, - self + "Failed to map namespace - {source_prefix} is not a valid prefix of {self}", ) })?; From 559017748c77a8a1cf1a5ca0ee4a4415aecf40e0 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 10 May 2022 16:06:41 +0200 Subject: [PATCH 142/299] pbs-api-types: add parse and print ns_and_snapshot these are helpers for the few cases where we want to print and parse from a format that has the namespace and snapshot combined, like for the on-tape catalog and snapshot archive. Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 58c26a87..95ef7120 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1463,3 +1463,26 @@ pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { ) .schema(), }; + +/// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z' +/// into a [`BackupNamespace`] and [`BackupDir`] +pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir), Error> { + match input.rmatch_indices('/').nth(2) { + Some((idx, _)) => { + let ns = BackupNamespace::from_path(&input[..idx])?; + let dir: BackupDir = (&input[idx + 1..]).parse()?; + Ok((ns, dir)) + } + None => Ok((BackupNamespace::root(), input.parse()?)), + } +} + +/// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of +/// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z' +pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String { + if ns.is_root() { + dir.to_string() + } else { + format!("{}/{}", ns.display_as_path(), dir) + } +} From 6cbbb57ca72ecffa18d07ce0a5f1dc3fab907366 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 3 May 2022 08:06:34 +0200 Subject: [PATCH 143/299] tape: add namespaces/recursion depth to tape backup jobs and manual api via TapeBackupJobSetup Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 2df19cfb..de546d5e 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -277,6 +277,14 @@ pub struct VerificationJobStatus { schema: GROUP_FILTER_LIST_SCHEMA, optional: true, }, + ns: { + type: BackupNamespace, + optional: true, + }, + "recursion-depth": { + schema: crate::NS_MAX_DEPTH_SCHEMA, + optional: true, + }, } )] #[derive(Serialize, Deserialize, Clone, Updater)] @@ -297,6 +305,10 @@ pub struct TapeBackupJobSetup { pub notify_user: Option, #[serde(skip_serializing_if = "Option::is_none")] pub group_filter: Option>, + #[serde(skip_serializing_if = "Option::is_none", default)] + pub ns: Option, + #[serde(skip_serializing_if = "Option::is_none", default)] + pub recursion_depth: Option, } #[api( From cb71f94110085237861d3ce2c415176ee0b293f5 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 5 May 2022 14:12:36 +0200 Subject: [PATCH 144/299] tape: add namespaces mapping type and the relevant parser for it Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 95ef7120..88179832 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1464,6 +1464,39 @@ pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { .schema(), }; +#[api( + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "max-depth": { + schema: NS_MAX_DEPTH_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// A namespace mapping +pub struct TapeRestoreNamespace { + /// The source datastore + pub store: String, + /// The source namespace. Root namespace if omitted. + pub source: Option, + /// The target namespace, + #[serde(skip_serializing_if = "Option::is_none")] + pub target: Option, + /// The (optional) recursion depth + #[serde(skip_serializing_if = "Option::is_none")] + pub max_depth: Option, +} + +pub const TAPE_RESTORE_NAMESPACE_SCHEMA: Schema = StringSchema::new("A namespace mapping") + .format(&ApiStringFormat::PropertyString( + &TapeRestoreNamespace::API_SCHEMA, + )) + .schema(); + /// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z' /// into a [`BackupNamespace`] and [`BackupDir`] pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir), Error> { From f6950c3ca944c5256b70b558df2ec0cde2bb8016 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 5 May 2022 15:59:36 +0200 Subject: [PATCH 145/299] api: tape/restore: add namespace mapping by adding a new parameter 'namespaces', which contains a mapping for a namespace like this: store=datastore,source=foo,target=bar,max-depth=2 if source or target are omitted the root namespace is used for its value this mapping can be given several times (on the cli) or as an array (via api) to have mappings for multiple datastores if a specific snapshot list is given simultaneously, the given snapshots will be restored according to this mapping, or to the source namespace if no mapping was found. to do this, we reutilize the restore_list_worker, but change it so that it does not hold a lock for the duration of the restore, but fails if the snapshot does exist at the end. also the snapshot will now be temporarily restored into the target datastore into the '.tmp/' folder. Signed-off-by: Dominik Csapak --- pbs-api-types/src/lib.rs | 8 ++++++++ pbs-api-types/src/tape/mod.rs | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index f604c892..d9c8cee1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -36,6 +36,14 @@ macro_rules! BACKUP_NS_RE { ); } +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_NS_PATH_RE { + () => ( + concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/") + ); +} + #[rustfmt::skip] #[macro_export] macro_rules! SNAPSHOT_PATH_REGEX_STR { diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index 0b60eefa..747b0bcd 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -30,7 +30,7 @@ use proxmox_uuid::Uuid; use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; const_regex! { - pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(:?", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$"); } pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = @@ -42,9 +42,9 @@ pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = .schema(); pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = - StringSchema::new("A snapshot in the format: 'store:type/id/time") + StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time") .format(&TAPE_RESTORE_SNAPSHOT_FORMAT) - .type_text("store:type/id/time") + .type_text("store:[ns/namespace/...]type/id/time") .schema(); #[api( From e3ad1c7f152564ee8e06ab8da273277ed163aac8 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 13 May 2022 09:06:11 +0200 Subject: [PATCH 146/299] api: tape/backup: fix namespace/max-depth parameters by adding the 'default' serde hint and renaming 'recursion_depth' to 'max_depth' (to be in line with sync job config) also add the logic to actually add/update the tape backup job config Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index de546d5e..885b10d4 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -281,7 +281,7 @@ pub struct VerificationJobStatus { type: BackupNamespace, optional: true, }, - "recursion-depth": { + "max-depth": { schema: crate::NS_MAX_DEPTH_SCHEMA, optional: true, }, @@ -308,7 +308,7 @@ pub struct TapeBackupJobSetup { #[serde(skip_serializing_if = "Option::is_none", default)] pub ns: Option, #[serde(skip_serializing_if = "Option::is_none", default)] - pub recursion_depth: Option, + pub max_depth: Option, } #[api( From 1d7f4ad0aa259738f19139433be63e81f04f50fc Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 16 May 2022 09:13:00 +0200 Subject: [PATCH 147/299] api types: BackupNamespace: remove unused, commented out code Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 38 ---------------------------------- 1 file changed, 38 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 88179832..4ccdaaa8 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -507,44 +507,6 @@ impl BackupNamespace { Ok(this) } - /* - /// Try to parse a file system path (where each sub-namespace is separated by an `ns` - /// subdirectory) into a valid namespace. - pub fn from_path>(path: T) -> Result { - use std::path::Component; - - let mut this = Self::root(); - let mut next_is_ns = true; - for component in path.as_ref().components() { - match component { - Component::Normal(component) if next_is_ns => { - if component.to_str() != Some("ns") { - bail!("invalid component in path: {:?}", component); - } - next_is_ns = false; - } - Component::Normal(component) => { - this.push( - component - .to_str() - .ok_or_else(|| { - format_err!("invalid component in path: {:?}", component) - })? - .to_string(), - )?; - next_is_ns = true; - } - Component::RootDir => { - next_is_ns = true; - } - _ => bail!("invalid component in path: {:?}", component.as_os_str()), - } - } - - Ok(this) - } - */ - /// Try to parse a file path string (where each sub-namespace is separated by an `ns` /// subdirectory) into a valid namespace. pub fn from_path(mut path: &str) -> Result { From b724d44153b0d520607be690e3f38cc1531ed681 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 16 May 2022 09:16:46 +0200 Subject: [PATCH 148/299] api types: BackupNamespace: fix depth check on pushing subdir to ns Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 4ccdaaa8..a3435c38 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -629,12 +629,10 @@ impl BackupNamespace { /// Assumes `subdir` already does not contain any slashes. /// Performs remaining checks and updates the length. fn push_do(&mut self, subdir: String) -> Result<(), Error> { - if self.depth() > MAX_NAMESPACE_DEPTH { - bail!( - "namespace to deep, {} > max {}", - self.inner.len(), - MAX_NAMESPACE_DEPTH - ); + let depth = self.depth(); + // check for greater equal to account for the to be added subdir + if depth >= MAX_NAMESPACE_DEPTH { + bail!("namespace to deep, {depth} >= max {MAX_NAMESPACE_DEPTH}"); } if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH { From 988e614129f10c080372239b037ea3c07a150d29 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 16 May 2022 09:50:10 +0200 Subject: [PATCH 149/299] api types: namespace: fix typo in error message Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a3435c38..e2bf70aa 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -632,7 +632,7 @@ impl BackupNamespace { let depth = self.depth(); // check for greater equal to account for the to be added subdir if depth >= MAX_NAMESPACE_DEPTH { - bail!("namespace to deep, {depth} >= max {MAX_NAMESPACE_DEPTH}"); + bail!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}"); } if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH { From d1fc9d87fabb047bf85c1d957637f7aaf54bb4fa Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 18 May 2022 12:51:07 +0200 Subject: [PATCH 150/299] Revert "verify: allow '0' days for reverification" This reverts commit c72fe7d77c560ad7699dd43d80dbfb98bded4d51. We could already cause the behavior by simply setting ignore-verified to false, aas that flag is basically an on/off switch for even considering outdated-after or not. So avoid the extra logic and just make the gui use the previously existing way. Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 885b10d4..0477f9b8 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -156,7 +156,7 @@ pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new("Days after that a verification becomes outdated. (0 means always)") - .minimum(0) + .minimum(1) .schema(); #[api( From 680d3914904926a6e1dd5886e6d6494af81595a3 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 18 May 2022 15:39:57 +0200 Subject: [PATCH 151/299] api types: verify job: allow outdated-afer == 0 for backward compat We can have those in existing verify jobs configs, and that'd break stuff. So, even while the "bad" commit got released only recently with `2.1.6-1` (14 April 2022), we still need to cope with those that used it, and using some serde parser magic to transform on read only is hard here due to section config (json-value and verify currently happen before we can do anything about it) Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 0477f9b8..1fda49c5 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -155,8 +155,8 @@ pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( .schema(); pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = - IntegerSchema::new("Days after that a verification becomes outdated. (0 means always)") - .minimum(1) + IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'") + .minimum(0) .schema(); #[api( From 9086f422afecee0797818ce0d5a8e1aae9b541ae Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 18 May 2022 15:45:55 +0200 Subject: [PATCH 152/299] api types: verify job: fix doc comment typo Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 1fda49c5..ed3d5a52 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -200,7 +200,7 @@ pub struct VerificationJobConfig { /// unique ID to address this job #[updater(skip)] pub id: String, - /// the datastore ID this verificaiton job affects + /// the datastore ID this verification job affects pub store: String, #[serde(skip_serializing_if = "Option::is_none")] /// if not set to false, check the age of the last snapshot verification to filter From 5925004a7de0de8ec73503d30c571c2f1117b5d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 24 May 2022 11:03:57 +0200 Subject: [PATCH 153/299] sync job: fix worker ID parsing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the namespace is optional, but should be captured to allow ACL checks for unprivileged non-job-owners. also add FIXME for other job types and workers that (might) need updating. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index ed3d5a52..71bd6789 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -18,7 +18,7 @@ const_regex! { /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' - pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(?:", BACKUP_NS_RE!(), r"):"); + pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); } pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") From bb0fdee898008dc4137568ff843b00ddb9d5c114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 24 May 2022 14:58:47 +0200 Subject: [PATCH 154/299] sync job: don't require privs on datastore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit syncing to a namespace only requires privileges on the namespace (and potentially its children during execution). Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 71bd6789..5dbfe49c 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, - NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, + Authid, BackupNamespace, BackupType, DatastoreWithNamespace, RateLimitConfig, Userid, + BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, + MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -498,6 +498,15 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, } +impl SyncJobConfig { + pub fn store_with_ns(&self) -> DatastoreWithNamespace { + DatastoreWithNamespace { + store: self.store.clone(), + ns: self.ns.clone().unwrap_or_default(), + } + } +} + #[api( properties: { config: { From d09aadee8444108337cc4740f306e01cc6430b5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 24 May 2022 12:07:07 +0200 Subject: [PATCH 155/299] verify_job: fix priv check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 5dbfe49c..d3739315 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -223,6 +223,15 @@ pub struct VerificationJobConfig { pub max_depth: Option, } +impl VerificationJobConfig { + pub fn store_with_ns(&self) -> DatastoreWithNamespace { + DatastoreWithNamespace { + store: self.store.clone(), + ns: self.ns.clone().unwrap_or_default(), + } + } +} + #[api( properties: { config: { From bbfbd9297fae30ffa74579466de86a5dc64db620 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 24 May 2022 10:31:54 +0200 Subject: [PATCH 156/299] api: add new priv to priv name helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for usage in permission check error messages, to allow easily indicating which privs are missing. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/acl.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index 6cdd0ee6..3142f6fc 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -73,6 +73,17 @@ constnamedbitmap! { } } +pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> { + PRIVILEGES + .iter() + .fold(Vec::new(), |mut priv_names, (name, value)| { + if value & privs != 0 { + priv_names.push(name); + } + priv_names + }) +} + /// Admin always has all privileges. It can do everything except a few actions /// which are limited to the 'root@pam` superuser pub const ROLE_ADMIN: u64 = u64::MAX; From f6a37f40f69a423c8f067383ab611995cd2a311e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 25 May 2022 15:14:56 +0200 Subject: [PATCH 157/299] tree-wide: remove DatastoreWithNamespace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit instead move the acl_path helper to BackupNamespace, and introduce a new helper for printing a store+ns when logging/generating error messages. Suggested-by: Thomas Lamprecht Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 49 ++++++++++++++-------------------- pbs-api-types/src/jobs.rs | 22 +++++++-------- 2 files changed, 31 insertions(+), 40 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index e2bf70aa..88724c3e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -694,6 +694,17 @@ impl BackupNamespace { } Ok(()) } + + pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { + let mut path: Vec<&str> = vec!["datastore", store]; + + if self.is_root() { + path + } else { + path.extend(self.inner.iter().map(|comp| comp.as_str())); + path + } + } } impl fmt::Display for BackupNamespace { @@ -1026,35 +1037,6 @@ impl fmt::Display for BackupDir { } } -/// Helper struct for places where sensible formatting of store+NS combo is required -pub struct DatastoreWithNamespace { - pub store: String, - pub ns: BackupNamespace, -} - -impl fmt::Display for DatastoreWithNamespace { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.ns.is_root() { - write!(f, "datastore {}, root namespace", self.store) - } else { - write!(f, "datastore '{}', namespace '{}'", self.store, self.ns) - } - } -} - -impl DatastoreWithNamespace { - pub fn acl_path(&self) -> Vec<&str> { - let mut path: Vec<&str> = vec!["datastore", &self.store]; - - if self.ns.is_root() { - path - } else { - path.extend(self.ns.inner.iter().map(|comp| comp.as_str())); - path - } - } -} - /// Used when both a backup group or a directory can be valid. pub enum BackupPart { Group(BackupGroup), @@ -1479,3 +1461,12 @@ pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String { format!("{}/{}", ns.display_as_path(), dir) } } + +/// Prints a Datastore name and [`BackupNamespace`] for logs/errors. +pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { + if ns.is_root() { + format!("datastore '{}', root namespace", store) + } else { + format!("datastore '{}', namespace '{}'", store, ns) + } +} diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index d3739315..c65a6085 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, BackupNamespace, BackupType, DatastoreWithNamespace, RateLimitConfig, Userid, - BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, + Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, + NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -224,10 +224,10 @@ pub struct VerificationJobConfig { } impl VerificationJobConfig { - pub fn store_with_ns(&self) -> DatastoreWithNamespace { - DatastoreWithNamespace { - store: self.store.clone(), - ns: self.ns.clone().unwrap_or_default(), + pub fn acl_path(&self) -> Vec<&str> { + match self.ns.as_ref() { + Some(ns) => ns.acl_path(&self.store), + None => vec!["datastore", &self.store], } } } @@ -508,10 +508,10 @@ pub struct SyncJobConfig { } impl SyncJobConfig { - pub fn store_with_ns(&self) -> DatastoreWithNamespace { - DatastoreWithNamespace { - store: self.store.clone(), - ns: self.ns.clone().unwrap_or_default(), + pub fn acl_path(&self) -> Vec<&str> { + match self.ns.as_ref() { + Some(ns) => ns.acl_path(&self.store), + None => vec!["datastore", &self.store], } } } From 79cf434a797b3f57c1d3b73602b6f681bee1ea7c Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 26 May 2022 13:33:58 +0200 Subject: [PATCH 158/299] datastore status: impl empty-status constructor for item type we can now use it for the error case and will further use it for the can access namespace but not datastore case in a future patch Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 88724c3e..c96521cb 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1361,6 +1361,23 @@ pub struct DataStoreStatusListItem { pub gc_status: Option, } +impl DataStoreStatusListItem { + pub fn empty(store: &str, err: Option) -> Self { + DataStoreStatusListItem { + store: store.to_owned(), + total: -1, + used: -1, + avail: -1, + history: None, + history_start: None, + history_delta: None, + estimated_full_date: None, + error: err, + gc_status: None, + } + } +} + pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { optional: false, schema: &ArraySchema::new( From 27718f2a72ff6f188f509556abbf28663ac16680 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 19 May 2022 09:43:09 +0200 Subject: [PATCH 159/299] api-types: add PruneJobConfig Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 68 ++++-------- pbs-api-types/src/jobs.rs | 182 +++++++++++++++++++++++++++++++++ 2 files changed, 204 insertions(+), 46 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index c96521cb..9331a8a5 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -157,52 +157,6 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); -#[api( - properties: { - "keep-last": { - schema: PRUNE_SCHEMA_KEEP_LAST, - optional: true, - }, - "keep-hourly": { - schema: PRUNE_SCHEMA_KEEP_HOURLY, - optional: true, - }, - "keep-daily": { - schema: PRUNE_SCHEMA_KEEP_DAILY, - optional: true, - }, - "keep-weekly": { - schema: PRUNE_SCHEMA_KEEP_WEEKLY, - optional: true, - }, - "keep-monthly": { - schema: PRUNE_SCHEMA_KEEP_MONTHLY, - optional: true, - }, - "keep-yearly": { - schema: PRUNE_SCHEMA_KEEP_YEARLY, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Common pruning options -pub struct PruneOptions { - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_last: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_hourly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_daily: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_weekly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_monthly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_yearly: Option, -} - #[api] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -705,6 +659,28 @@ impl BackupNamespace { path } } + + /// Check whether this namespace contains another namespace. + /// + /// If so, the depth is returned. + /// + /// Example: + /// ``` + /// # use pbs_api_types::BackupNamespace; + /// let main: BackupNamespace = "a/b".parse().unwrap(); + /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap(); + /// let other: BackupNamespace = "x/y".parse().unwrap(); + /// assert_eq!(main.contains(&main), Some(0)); + /// assert_eq!(main.contains(&sub), Some(2)); + /// assert_eq!(sub.contains(&main), None); + /// assert_eq!(main.contains(&other), None); + /// ``` + pub fn contains(&self, other: &BackupNamespace) -> Option { + other + .inner + .strip_prefix(&self.inner[..]) + .map(|suffix| suffix.len()) + } } impl fmt::Display for BackupNamespace { diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index c65a6085..45a2c4f2 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -535,3 +535,185 @@ pub struct SyncJobStatus { #[serde(flatten)] pub status: JobScheduleStatus, } + +/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API +/// call to prune a specific group, where `max-depth` makes no sense. +#[api( + properties: { + "keep-last": { + schema: crate::PRUNE_SCHEMA_KEEP_LAST, + optional: true, + }, + "keep-hourly": { + schema: crate::PRUNE_SCHEMA_KEEP_HOURLY, + optional: true, + }, + "keep-daily": { + schema: crate::PRUNE_SCHEMA_KEEP_DAILY, + optional: true, + }, + "keep-weekly": { + schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY, + optional: true, + }, + "keep-monthly": { + schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY, + optional: true, + }, + "keep-yearly": { + schema: crate::PRUNE_SCHEMA_KEEP_YEARLY, + optional: true, + }, + } +)] +#[derive(Serialize, Deserialize, Default, Updater)] +#[serde(rename_all = "kebab-case")] +/// Common pruning options +pub struct KeepOptions { + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_hourly: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_daily: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_weekly: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_monthly: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub keep_yearly: Option, +} + +impl KeepOptions { + pub fn keeps_something(&self) -> bool { + self.keep_last.unwrap_or(0) + + self.keep_hourly.unwrap_or(0) + + self.keep_daily.unwrap_or(0) + + self.keep_monthly.unwrap_or(0) + + self.keep_yearly.unwrap_or(0) + > 0 + } +} + +#[api( + properties: { + keep: { + type: KeepOptions, + }, + ns: { + type: BackupNamespace, + optional: true, + }, + "max-depth": { + schema: NS_MAX_DEPTH_REDUCED_SCHEMA, + optional: true, + }, + } +)] +#[derive(Serialize, Deserialize, Default, Updater)] +#[serde(rename_all = "kebab-case")] +/// Common pruning options +pub struct PruneJobOptions { + #[serde(flatten)] + pub keep: KeepOptions, + + /// The (optional) recursion depth + #[serde(skip_serializing_if = "Option::is_none")] + pub max_depth: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub ns: Option, +} + +impl PruneJobOptions { + pub fn keeps_something(&self) -> bool { + self.keep.keeps_something() + } + + pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { + match &self.ns { + Some(ns) => ns.acl_path(store), + None => vec!["datastore", store], + } + } +} + +#[api( + properties: { + disable: { + type: Boolean, + optional: true, + default: false, + }, + id: { + schema: JOB_ID_SCHEMA, + }, + store: { + schema: DATASTORE_SCHEMA, + }, + schedule: { + schema: PRUNE_SCHEDULE_SCHEMA, + optional: true, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + options: { + type: PruneJobOptions, + }, + }, +)] +#[derive(Deserialize, Serialize, Updater)] +#[serde(rename_all = "kebab-case")] +/// Prune configuration. +pub struct PruneJobConfig { + /// unique ID to address this job + #[updater(skip)] + pub id: String, + + pub store: String, + + /// Disable this job. + #[serde(default, skip_serializing_if = "is_false")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + pub disable: bool, + + pub schedule: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + + #[serde(flatten)] + pub options: PruneJobOptions, +} + +impl PruneJobConfig { + pub fn acl_path(&self) -> Vec<&str> { + self.options.acl_path(&self.store) + } +} + +fn is_false(b: &bool) -> bool { + !b +} + +#[api( + properties: { + config: { + type: PruneJobConfig, + }, + status: { + type: JobScheduleStatus, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Status of prune job +pub struct PruneJobStatus { + #[serde(flatten)] + pub config: PruneJobConfig, + #[serde(flatten)] + pub status: JobScheduleStatus, +} From 705ee9c93f650f232e1731d8e1fadc5337f6a4ef Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 19 May 2022 11:02:01 +0200 Subject: [PATCH 160/299] add prune jobs api Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 9331a8a5..a462c2ec 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -264,14 +264,19 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore pub struct DataStoreConfig { #[updater(skip)] pub name: String, + #[updater(skip)] pub path: String, + #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub gc_schedule: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub prune_schedule: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub keep_last: Option, #[serde(skip_serializing_if = "Option::is_none")] @@ -284,18 +289,23 @@ pub struct DataStoreConfig { pub keep_monthly: Option, #[serde(skip_serializing_if = "Option::is_none")] pub keep_yearly: Option, + /// If enabled, all backups will be verified right after completion. #[serde(skip_serializing_if = "Option::is_none")] pub verify_new: Option, + /// Send job email notification to this user #[serde(skip_serializing_if = "Option::is_none")] pub notify_user: Option, + /// Send notification only for job errors #[serde(skip_serializing_if = "Option::is_none")] pub notify: Option, + /// Datastore tuning options #[serde(skip_serializing_if = "Option::is_none")] pub tuning: Option, + /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_mode: Option, From 288074a018c3a18bb1d5a9d26ef427a195922706 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 24 May 2022 12:54:42 +0200 Subject: [PATCH 161/299] manager: hidden command to move datastore prune opts into jobs Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 46 ++++------------------------------ 1 file changed, 5 insertions(+), 41 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a462c2ec..2bb47bbc 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -218,29 +218,8 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore optional: true, schema: PRUNE_SCHEDULE_SCHEMA, }, - "keep-last": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_LAST, - }, - "keep-hourly": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_HOURLY, - }, - "keep-daily": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_DAILY, - }, - "keep-weekly": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_WEEKLY, - }, - "keep-monthly": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_MONTHLY, - }, - "keep-yearly": { - optional: true, - schema: PRUNE_SCHEMA_KEEP_YEARLY, + keep: { + type: crate::KeepOptions, }, "verify-new": { description: "If enabled, all new backups will be verified right after completion.", @@ -277,18 +256,8 @@ pub struct DataStoreConfig { #[serde(skip_serializing_if = "Option::is_none")] pub prune_schedule: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_last: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_hourly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_daily: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_weekly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_monthly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_yearly: Option, + #[serde(flatten)] + pub keep: crate::KeepOptions, /// If enabled, all backups will be verified right after completion. #[serde(skip_serializing_if = "Option::is_none")] @@ -319,12 +288,7 @@ impl DataStoreConfig { comment: None, gc_schedule: None, prune_schedule: None, - keep_last: None, - keep_hourly: None, - keep_daily: None, - keep_weekly: None, - keep_monthly: None, - keep_yearly: None, + keep: Default::default(), verify_new: None, notify_user: None, notify: None, From 1d5b46c21ec6e368ece9d2525f006ec4672b02c1 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 1 Jun 2022 14:30:24 +0200 Subject: [PATCH 162/299] api types: prune keep options: also check weekly in keeps_something Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 45a2c4f2..68ef89eb 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -589,6 +589,7 @@ impl KeepOptions { self.keep_last.unwrap_or(0) + self.keep_hourly.unwrap_or(0) + self.keep_daily.unwrap_or(0) + + self.keep_weekly.unwrap_or(0) + self.keep_monthly.unwrap_or(0) + self.keep_yearly.unwrap_or(0) > 0 From 551cd92fa89723ec119f089ca9f1e489ab78a664 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 2 Jun 2022 15:57:07 +0200 Subject: [PATCH 163/299] api types: clippy lints Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/jobs.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 2bb47bbc..bb20e149 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -944,7 +944,7 @@ impl BackupDir { where T: Into, { - let time = proxmox_time::parse_rfc3339(&backup_time_string)?; + let time = proxmox_time::parse_rfc3339(backup_time_string)?; let group = BackupGroup::new(ty, id.into()); Ok(Self { group, time }) } diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 68ef89eb..925a1829 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -390,7 +390,7 @@ impl std::str::FromStr for GroupFilter { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - match s.split_once(":") { + match s.split_once(':') { Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)), Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), From 2ec6f86f633282a249e55779c401ef2027208d95 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 7 Jun 2022 09:22:45 +0200 Subject: [PATCH 164/299] tree wide: typo fixes through codespell Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/human_byte.rs | 2 +- pbs-api-types/src/traffic_control.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index bb20e149..86201b8e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -470,7 +470,7 @@ impl BackupNamespace { Ok(child) } - /// Pop one level off the namespace hierachy + /// Pop one level off the namespace hierarchy pub fn pop(&mut self) -> Option { let dropped = self.inner.pop(); if let Some(ref dropped) = dropped { diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 9e1a1893..532632c8 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -69,7 +69,7 @@ impl SizeUnit { } } -/// Returns the string repesentation +/// Returns the string representation impl std::fmt::Display for SizeUnit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index d29f18b4..3ed579cf 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -113,7 +113,7 @@ pub struct TrafficControlRule { #[serde(flatten)] pub limit: RateLimitConfig, // fixme: expose this? - // /// Bandwidth is shared accross all connections + // /// Bandwidth is shared across all connections // #[serde(skip_serializing_if="Option::is_none")] // pub shared: Option, /// Enable the rule at specific times From 57fa204064bf2f1cb7e010d676da4f5694f470d6 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 10 Jun 2022 13:17:51 +0200 Subject: [PATCH 165/299] pbs-api-types: add metrics api types InfluxDbUdp and InfluxDbHttp for now introduces schemas for host:port and https urls Signed-off-by: Dominik Csapak Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/lib.rs | 17 ++++ pbs-api-types/src/metrics.rs | 148 +++++++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+) create mode 100644 pbs-api-types/src/metrics.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index d9c8cee1..70c9ec45 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -120,6 +120,9 @@ pub use traffic_control::*; mod zfs; pub use zfs::*; +mod metrics; +pub use metrics::*; + #[rustfmt::skip] #[macro_use] mod local_macros { @@ -131,6 +134,7 @@ mod local_macros { macro_rules! DNS_ALIAS_NAME { () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")")) } + macro_rules! PORT_REGEX_STR { () => (r"(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])") } } const_regex! { @@ -144,6 +148,8 @@ const_regex! { pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$"); pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$"); pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$"); + pub HOST_PORT_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), "):", PORT_REGEX_STR!() ,"$"); + pub HTTP_URL_REGEX = concat!(r"^https?://(?:(?:(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), ")(?::", PORT_REGEX_STR!() ,")?)|", IPV6RE!(),")(?:/[^\x00-\x1F\x7F]*)?$"); pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ? @@ -201,6 +207,8 @@ pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); +pub const HOST_PORT_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOST_PORT_REGEX); +pub const HTTP_URL_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HTTP_URL_REGEX); pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); @@ -244,6 +252,15 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr .format(&DNS_NAME_OR_IP_FORMAT) .schema(); +pub const HOST_PORT_SCHEMA: Schema = + StringSchema::new("host:port combination (Host can be DNS name or IP address).") + .format(&HOST_PORT_FORMAT) + .schema(); + +pub const HTTP_URL_SCHEMA: Schema = StringSchema::new("HTTP(s) url with optional port.") + .format(&HTTP_URL_FORMAT) + .schema(); + pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") .format(&HOSTNAME_FORMAT) .schema(); diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs new file mode 100644 index 00000000..f5cfe95d --- /dev/null +++ b/pbs-api-types/src/metrics.rs @@ -0,0 +1,148 @@ +use serde::{Deserialize, Serialize}; + +use crate::{ + HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, +}; +use proxmox_schema::{api, Schema, StringSchema, Updater}; + +pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .schema(); + +pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .default("proxmox") + .schema(); + +pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.") + .format(&PROXMOX_SAFE_ID_FORMAT) + .min_length(3) + .max_length(32) + .default("proxmox") + .schema(); + +fn return_true() -> bool { + true +} + +fn is_true(b: &bool) -> bool { + *b +} + +#[api( + properties: { + name: { + schema: METRIC_SERVER_ID_SCHEMA, + }, + enable: { + type: bool, + optional: true, + default: true, + }, + host: { + schema: HOST_PORT_SCHEMA, + }, + mtu: { + type: u16, + optional: true, + default: 1500, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize, Updater)] +#[serde(rename_all = "kebab-case")] +/// InfluxDB Server (UDP) +pub struct InfluxDbUdp { + #[updater(skip)] + pub name: String, + #[serde(default = "return_true", skip_serializing_if = "is_true")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + /// Enables or disables the metrics server + pub enable: bool, + /// the host + port + pub host: String, + #[serde(skip_serializing_if = "Option::is_none")] + /// The MTU + pub mtu: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} + +#[api( + properties: { + name: { + schema: METRIC_SERVER_ID_SCHEMA, + }, + enable: { + type: bool, + optional: true, + default: true, + }, + url: { + schema: HTTP_URL_SCHEMA, + }, + token: { + type: String, + optional: true, + }, + bucket: { + schema: INFLUXDB_BUCKET_SCHEMA, + optional: true, + }, + organization: { + schema: INFLUXDB_ORGANIZATION_SCHEMA, + optional: true, + }, + "max-body-size": { + type: usize, + optional: true, + default: 25_000_000, + }, + "verify-tls": { + type: bool, + optional: true, + default: true, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Serialize, Deserialize, Updater)] +#[serde(rename_all = "kebab-case")] +/// InfluxDB Server (HTTP(s)) +pub struct InfluxDbHttp { + #[updater(skip)] + pub name: String, + #[serde(default = "return_true", skip_serializing_if = "is_true")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + /// Enables or disables the metrics server + pub enable: bool, + /// The base url of the influxdb server + pub url: String, + /// The Optional Token + #[serde(skip_serializing_if = "Option::is_none")] + /// The (optional) API token + pub token: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub bucket: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub organization: Option, + #[serde(skip_serializing_if = "Option::is_none")] + /// The (optional) maximum body size + pub max_body_size: Option, + #[serde(skip_serializing_if = "Option::is_none")] + /// If true, the certificate will be validated. + pub verify_tls: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} From 214317df9316820418c36ec5b0313e55a8939f27 Mon Sep 17 00:00:00 2001 From: Stefan Sterz Date: Wed, 29 Jun 2022 10:55:38 +0200 Subject: [PATCH 166/299] api-types: doc: add crate to `Display` trait in comments when creating the documentation (e.g. `cargo doc --open`), it would warn that `Display` is not in scope. Signed-off-by: Stefan Sterz Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 86201b8e..70b639ea 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -579,7 +579,7 @@ impl BackupNamespace { Ok(()) } - /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every + /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every /// component. pub fn display_as_path(&self) -> BackupNamespacePath { BackupNamespacePath(self) @@ -691,7 +691,7 @@ impl ApiType for BackupNamespace { /// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`]. /// -/// This implements [`Display`] such that it includes the `ns/` subdirectory prefix in front of +/// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of /// every component. pub struct BackupNamespacePath<'a>(&'a BackupNamespace); From 1647fc93ff5d5fecb273fe3b45f312f3cbaaa849 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 14 Jul 2022 11:09:34 +0200 Subject: [PATCH 167/299] api-types: make BackupType::iter an actual iterator Otherwise we have to use BackupType::iter().iter() whenever we're not using a `for _ in iter()` construct. Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 70b639ea..170d248f 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -741,8 +741,11 @@ impl BackupType { } } - pub const fn iter() -> &'static [BackupType] { - &[Self::Vm, Self::Ct, Self::Host] + #[inline] + pub fn iter() -> impl Iterator + Send + Sync + Unpin + 'static { + [BackupType::Vm, BackupType::Ct, BackupType::Host] + .iter() + .copied() } } From 74092debc5d51af3891c87e30915aa39fd7b23ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 26 Jul 2022 13:36:14 +0200 Subject: [PATCH 168/299] more clippy fixes and annotations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the remaining ones are: - type complexity - fns with many arguments - new() without default() - false positives for redundant closures (where closure returns a static value) - expected vs actual length check without match/cmp Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 170d248f..fbd01fc4 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1190,7 +1190,7 @@ pub struct TypeCounts { }, }, )] -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Garbage collection status. pub struct GarbageCollectionStatus { @@ -1217,24 +1217,6 @@ pub struct GarbageCollectionStatus { pub still_bad: usize, } -impl Default for GarbageCollectionStatus { - fn default() -> Self { - GarbageCollectionStatus { - upid: None, - index_file_count: 0, - index_data_bytes: 0, - disk_bytes: 0, - disk_chunks: 0, - removed_bytes: 0, - removed_chunks: 0, - pending_bytes: 0, - pending_chunks: 0, - removed_bad: 0, - still_bad: 0, - } - } -} - #[api( properties: { "gc-status": { From 3d670c9e4e5a6db81e35804d2da5d9683eff83a4 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 27 Jul 2022 13:29:32 +0200 Subject: [PATCH 169/299] api-types: clippy fixes Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 6 +++--- pbs-api-types/src/human_byte.rs | 2 +- pbs-api-types/src/jobs.rs | 2 +- pbs-api-types/src/lib.rs | 2 +- pbs-api-types/src/maintenance.rs | 2 +- pbs-api-types/src/network.rs | 8 ++++---- pbs-api-types/src/tape/media_location.rs | 2 +- pbs-api-types/src/tape/media_status.rs | 2 +- pbs-api-types/src/userid.rs | 6 +++--- pbs-api-types/src/zfs.rs | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index fbd01fc4..0af11b33 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -158,7 +158,7 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .schema(); #[api] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The order to sort chunks by pub enum ChunkOrder { @@ -357,7 +357,7 @@ pub struct BackupContent { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Result of a verify operation. pub enum VerifyState { @@ -1397,7 +1397,7 @@ pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir) match input.rmatch_indices('/').nth(2) { Some((idx, _)) => { let ns = BackupNamespace::from_path(&input[..idx])?; - let dir: BackupDir = (&input[idx + 1..]).parse()?; + let dir: BackupDir = input[(idx + 1)..].parse()?; Ok((ns, dir)) } None => Ok((BackupNamespace::root(), input.parse()?)), diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 532632c8..37a74f77 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -3,7 +3,7 @@ use anyhow::{bail, Error}; use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType}; /// Size units for byte sizes -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SizeUnit { Byte, // SI (base 10) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 925a1829..e4be03f0 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -102,7 +102,7 @@ pub struct JobScheduleStatus { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// When do we send notifications pub enum Notify { diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 70c9ec45..18cde45e 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -401,7 +401,7 @@ pub struct APTUpdateInfo { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Node Power command type. pub enum NodePowerCommand { diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 5bbba043..7eb33963 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -33,7 +33,7 @@ pub enum Operation { } #[api] -#[derive(Deserialize, Serialize, PartialEq)] +#[derive(Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] /// Maintenance type. pub enum MaintenanceType { diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index dda0db1b..4b0671c5 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -42,7 +42,7 @@ pub const CIDR_SCHEMA: Schema = .schema(); #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Interface configuration method pub enum NetworkConfigMethod { @@ -57,7 +57,7 @@ pub enum NetworkConfigMethod { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] #[allow(non_camel_case_types)] #[repr(u8)] @@ -81,7 +81,7 @@ pub enum LinuxBondMode { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] #[allow(non_camel_case_types)] #[repr(u8)] @@ -98,7 +98,7 @@ pub enum BondXmitHashPolicy { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Network interface type pub enum NetworkInterfaceType { diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs index b81ea9a8..608460b5 100644 --- a/pbs-api-types/src/tape/media_location.rs +++ b/pbs-api-types/src/tape/media_location.rs @@ -10,7 +10,7 @@ pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.") .max_length(32) .schema(); -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone)] /// Media location pub enum MediaLocation { /// Ready for use (inside tape library) diff --git a/pbs-api-types/src/tape/media_status.rs b/pbs-api-types/src/tape/media_status.rs index 9815b91f..fdb4e6a0 100644 --- a/pbs-api-types/src/tape/media_status.rs +++ b/pbs-api-types/src/tape/media_status.rs @@ -4,7 +4,7 @@ use proxmox_schema::api; #[api()] /// Media status -#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Media Status pub enum MediaStatus { diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index ecbae9c2..996e96e5 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -316,7 +316,7 @@ impl PartialEq for RealmRef { impl PartialEq for &RealmRef { fn eq(&self, rhs: &Realm) -> bool { - (*self).0 == rhs.0 + self.0 == rhs.0 } } @@ -453,7 +453,7 @@ impl Userid { /// Get the "root@pam" user id. pub fn root_userid() -> &'static Self { - &*ROOT_USERID + &ROOT_USERID } } @@ -586,7 +586,7 @@ impl Authid { /// Get the "root@pam" auth id. pub fn root_auth_id() -> &'static Self { - &*ROOT_AUTHID + &ROOT_AUTHID } } diff --git a/pbs-api-types/src/zfs.rs b/pbs-api-types/src/zfs.rs index b62af6cb..57fa5cf4 100644 --- a/pbs-api-types/src/zfs.rs +++ b/pbs-api-types/src/zfs.rs @@ -17,7 +17,7 @@ pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name") .schema(); #[api(default: "On")] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The ZFS compression algorithm to use. pub enum ZfsCompressionType { @@ -38,7 +38,7 @@ pub enum ZfsCompressionType { } #[api()] -#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The ZFS RAID level to use. pub enum ZfsRaidLevel { From 49e553935fd0b4adca56692247714f5e1edb0936 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Wed, 13 Jul 2022 11:43:12 +0200 Subject: [PATCH 170/299] pbs-api-types: add FileRestoreFormat type intended for passing the format to the file-restore client/daemon Signed-off-by: Dominik Csapak Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/file_restore.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pbs-api-types/src/file_restore.rs b/pbs-api-types/src/file_restore.rs index 5748f3a7..90657d65 100644 --- a/pbs-api-types/src/file_restore.rs +++ b/pbs-api-types/src/file_restore.rs @@ -13,3 +13,18 @@ pub struct RestoreDaemonStatus { /// not set, as then the status call will have reset the timer before returning the value pub timeout: i64, } + +#[api] +#[derive(Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "kebab-case")] +/// The desired format of the result. +pub enum FileRestoreFormat { + /// Plain file (only works for single files) + Plain, + /// PXAR archive + Pxar, + /// ZIP archive + Zip, + /// TAR archive + Tar, +} From dbd5906402bbb8eefc170c3924d5b976038f41c0 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 6 Oct 2022 11:08:12 +0200 Subject: [PATCH 171/299] fix #4274: implement prune notifications we converted the prune settings of datastores to prune-jobs, but did not actually implement the notifications for them, even though we had the notification options in the gui (they did not work). implement the basic ok/error notification for prune jobs Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e4be03f0..7f029af7 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -128,6 +128,10 @@ pub enum Notify { type: Notify, optional: true, }, + prune: { + type: Notify, + optional: true, + }, }, )] #[derive(Debug, Serialize, Deserialize)] @@ -139,6 +143,8 @@ pub struct DatastoreNotify { pub verify: Option, /// Sync job setting pub sync: Option, + /// Prune job setting + pub prune: Option, } pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = From 4a13373c4ba7ee07bd35018b4a5e60e211c2fa1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 11 Oct 2022 09:26:32 +0200 Subject: [PATCH 172/299] clippy fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and one additional API fn "allow many parameters" addition. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/human_byte.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index 37a74f77..f9ac6d53 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -246,7 +246,7 @@ fn test_human_byte_parser() -> Result<(), Error> { } let new = h.to_string(); - if &new != as_str { + if new != *as_str { bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str); } Ok(()) From e0fb53e41dc9ffd63c4760c725c23cb5fa735da3 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 20 Oct 2022 09:40:56 +0200 Subject: [PATCH 173/299] datastore: implement sync-level tuning for datastores currently, we don't (f)sync on chunk insertion (or at any point after that), which can lead to broken chunks in case of e.g. an unexpected powerloss. To fix that, offer a tuning option for datastores that controls the level of syncs it does: * None (default): same as current state, no (f)syncs done at any point * Filesystem: at the end of a backup, the datastore issues a syncfs(2) to the filesystem of the datastore * File: issues an fsync on each chunk as they get inserted (using our 'replace_file' helper) and a fsync on the directory handle a small benchmark showed the following (times in mm:ss): setup: virtual pbs, 4 cores, 8GiB memory, ext4 on spinner size none filesystem file 2GiB (fits in ram) 00:13 0:41 01:00 33GiB 05:21 05:31 13:45 so if the backup fits in memory, there is a large difference between all of the modes (expected), but as soon as it exceeds the memory size, the difference between not syncing and syncing the fs at the end becomes much smaller. i also tested on an nvme, but there the syncs basically made no difference Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 37 ++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 0af11b33..15ea80cd 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -168,6 +168,42 @@ pub enum ChunkOrder { Inode, } +#[api] +#[derive(PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// The level of syncing that is done when writing into a datastore. +pub enum DatastoreFSyncLevel { + /// No special fsync or syncfs calls are triggered. The system default dirty write back + /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs` + /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s. + /// + /// This mode provides generally the best performance, as all write back can happen async, + /// which reduces IO pressure. + /// But it may cause losing data on powerloss or system crash without any uninterruptible power + /// supply. + None, + /// Triggers a fsync after writing any chunk on the datastore. While this can slow down + /// backups significantly, depending on the underlying file system and storage used, it + /// will ensure fine-grained consistency. Depending on the exact setup, there might be no + /// benefits over the file system level sync, so if the setup allows it, you should prefer + /// that one. Despite the possible negative impact in performance, it's the most consistent + /// mode. + File, + /// Trigger a filesystem wide sync after all backup data got written but before finishing the + /// task. This allows that every finished backup is fully written back to storage + /// while reducing the impact on many file systems in contrast to the file level sync. + /// Depending on the setup, it might have a negative impact on unrelated write operations + /// of the underlying filesystem, but it is generally a good compromise between performance + /// and consitency. + Filesystem, +} + +impl Default for DatastoreFSyncLevel { + fn default() -> Self { + DatastoreFSyncLevel::None + } +} + #[api( properties: { "chunk-order": { @@ -182,6 +218,7 @@ pub enum ChunkOrder { pub struct DatastoreTuning { /// Iterate chunks in this order pub chunk_order: Option, + pub sync_level: Option, } pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options") From 495da87f80a62e588889bf4243e3b2736931cf60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Mon, 24 Oct 2022 12:10:19 +0200 Subject: [PATCH 174/299] clippy fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the dropped .into() is guarded by the bumped build-dependency on proxmox-sys 0.4.1, the missing Eq is a new clippy lint. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 15ea80cd..865a7b55 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -169,7 +169,7 @@ pub enum ChunkOrder { } #[api] -#[derive(PartialEq, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The level of syncing that is done when writing into a datastore. pub enum DatastoreFSyncLevel { From 7a98c5d50ca8828f983e425e489ec0f05396d190 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 28 Oct 2022 09:34:47 +0200 Subject: [PATCH 175/299] datastore: improve sync level code a bit fixups for DatastoreFSyncLevel: * use derive for Default * add some more derives (Clone, Copy) chunk store: * drop to_owned for chunk_dir_path Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 865a7b55..4c9eda2f 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -169,7 +169,7 @@ pub enum ChunkOrder { } #[api] -#[derive(PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The level of syncing that is done when writing into a datastore. pub enum DatastoreFSyncLevel { @@ -181,6 +181,7 @@ pub enum DatastoreFSyncLevel { /// which reduces IO pressure. /// But it may cause losing data on powerloss or system crash without any uninterruptible power /// supply. + #[default] None, /// Triggers a fsync after writing any chunk on the datastore. While this can slow down /// backups significantly, depending on the underlying file system and storage used, it @@ -198,12 +199,6 @@ pub enum DatastoreFSyncLevel { Filesystem, } -impl Default for DatastoreFSyncLevel { - fn default() -> Self { - DatastoreFSyncLevel::None - } -} - #[api( properties: { "chunk-order": { From b5708459d3c55a8b6cb192933a90933f68af35df Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 15 Nov 2022 10:16:54 +0100 Subject: [PATCH 176/299] api-types: derive Ord for BackupDir Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 4c9eda2f..95aa8830 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -930,7 +930,7 @@ impl std::str::FromStr for BackupGroup { /// Uniquely identify a Backup (relative to data store) /// /// We also call this a backup snaphost. -#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] pub struct BackupDir { /// Backup group. From 017a0652cd8cf210550e8184a143ed0868cc1518 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 4 Nov 2022 10:49:34 +0100 Subject: [PATCH 177/299] datastore: make 'filesystem' the default sync-level rationale is that it makes the backup much safer than 'none', but does not incur a big of a performance hit as 'file'. here some benchmark: data to be backed up: ~14GiB semi-random test images between 12kiB and 4GiB that results in ~11GiB chunks (more than ram available on the target) PBS setup: virtualized (on an idle machine), PBS itself was also idle 8 cores (kvm64 on Intel 12700k) and 8 GiB memory all virtual disks are on LVM with discard and iothread on the HDD is a 4TB Seagate ST4000DM000 drive, and the NVME is a 2TB Crucial CT2000P5PSSD8 i tested each disk with ext4/xfs/zfs (default created with the gui) with 5 runs each, inbetween the caches are flushed and the filesystem synced i removed the biggest and smallest result and from the remaining 3 results built the average (percentage is relative to the 'none' result) result: test none filesystem file hdd - ext4 125.67s 140.39s (+11.71%) 358.10s (+184.95%) hdd - xfs 92.18s 102.64s (+11.35%) 351.58s (+281.41%) hdd - zfs 94.82s 104.00s (+9.68%) 309.13s (+226.02%) nvme - ext4 60.44s 60.26s (-0.30%) 60.47s (+0.05%) nvme - xfs 60.11s 60.47s (+0.60%) 60.49s (+0.63%) nvme - zfs 60.83s 60.85s (+0.03%) 60.80s (-0.05%) So all in all, it does not seem to make a difference for nvme drives, for hdds 'filesystem' increases backup time by ~10%, while for 'file' it largely depends on the filesystem, but always in the range of factor ~3 - ~4 Note that this does not take into account parallel actions, such as gc, verify or other backups. Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 95aa8830..dde385c3 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -181,7 +181,6 @@ pub enum DatastoreFSyncLevel { /// which reduces IO pressure. /// But it may cause losing data on powerloss or system crash without any uninterruptible power /// supply. - #[default] None, /// Triggers a fsync after writing any chunk on the datastore. While this can slow down /// backups significantly, depending on the underlying file system and storage used, it @@ -196,6 +195,7 @@ pub enum DatastoreFSyncLevel { /// Depending on the setup, it might have a negative impact on unrelated write operations /// of the underlying filesystem, but it is generally a good compromise between performance /// and consitency. + #[default] Filesystem, } From 5196e0bbeea002e91c80973253651ca7559e0662 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 25 Nov 2022 11:18:40 +0100 Subject: [PATCH 178/299] api-types: make Operation Eq Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/maintenance.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 7eb33963..692be021 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -17,7 +17,7 @@ pub const MAINTENANCE_MESSAGE_SCHEMA: Schema = .max_length(64) .schema(); -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] /// Operation requirements, used when checking for maintenance mode. pub enum Operation { /// for any read operation like backup restore or RRD metric collection From cf10742842aaea24705e8d9b61d8c585bc554ff2 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 25 Nov 2022 11:18:56 +0100 Subject: [PATCH 179/299] api-types: derive Display and FromStr for MaintenanceType Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/maintenance.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 692be021..e46ba90a 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -47,6 +47,8 @@ pub enum MaintenanceType { /// Neither read nor write operations are allowed on the datastore. Offline, } +serde_plain::derive_display_from_serialize!(MaintenanceType); +serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); #[api( properties: { From 854fb5c08f3bd06d3b0dec28277b5065f4e61e50 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 25 Nov 2022 11:19:11 +0100 Subject: [PATCH 180/299] api-types: add MaintenanceType::Delete Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/maintenance.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index e46ba90a..1b03ca94 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -46,6 +46,8 @@ pub enum MaintenanceType { ReadOnly, /// Neither read nor write operations are allowed on the datastore. Offline, + /// The datastore is being deleted. + Delete, } serde_plain::derive_display_from_serialize!(MaintenanceType); serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); @@ -76,6 +78,10 @@ pub struct MaintenanceMode { impl MaintenanceMode { pub fn check(&self, operation: Option) -> Result<(), Error> { + if self.ty == MaintenanceType::Delete { + bail!("datastore is being deleted"); + } + let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or("")) .decode_utf8() .unwrap_or(Cow::Borrowed("")); From d70ae30a4358aaa82e07948c515c2aa988f4e282 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 28 Nov 2022 14:26:40 +0100 Subject: [PATCH 181/299] use derive 'Default' for ChunkOrder instead of hardcoding the default deep inside the code. This makes it much easier to see what is the actual default the first instance of ChunkOrder::None was only for the test case, were the ordering doe not matter Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index dde385c3..d75ead90 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -158,13 +158,14 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .schema(); #[api] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// The order to sort chunks by pub enum ChunkOrder { /// Iterate chunks in the index order None, /// Iterate chunks in inode order + #[default] Inode, } From 378b763408b7de91ef04b0b85f502b7f228d2f06 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 6 Dec 2022 11:19:41 +0100 Subject: [PATCH 182/299] tree-wide: bump edition to 2021 Signed-off-by: Wolfgang Bumiller --- pbs-api-types/Cargo.toml | 2 +- pbs-api-types/src/tape/drive.rs | 2 -- pbs-api-types/src/userid.rs | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 8f897d5d..61d37d77 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -2,7 +2,7 @@ name = "pbs-api-types" version = "0.1.0" authors = ["Proxmox Support Team "] -edition = "2018" +edition = "2021" description = "general API type helpers for PBS" [dependencies] diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index c8cb077c..d841505a 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -1,6 +1,4 @@ //! Types for tape drive API -use std::convert::TryFrom; - use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs index 996e96e5..052e66ed 100644 --- a/pbs-api-types/src/userid.rs +++ b/pbs-api-types/src/userid.rs @@ -22,7 +22,6 @@ //! compared with each other, as in those cases the comparison has meaning. use std::borrow::Borrow; -use std::convert::TryFrom; use std::fmt; use anyhow::{bail, format_err, Error}; From 6623ebdf2a5cc684a3d943d1dbf8f0bb43118fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 7 Dec 2022 11:33:47 +0100 Subject: [PATCH 183/299] workspace: inherit metadata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pbs-buildcfg is the only one that needs to inherit the version as well, since it stores it in the compiled crate. Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 61d37d77..ee64b6e1 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "pbs-api-types" version = "0.1.0" -authors = ["Proxmox Support Team "] -edition = "2021" +authors.workspace = true +edition.workspace = true description = "general API type helpers for PBS" [dependencies] From 580399b26cdc03535b5b9aacb9b74b06ff9aebbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 9 Dec 2022 11:37:02 +0100 Subject: [PATCH 184/299] switch proxmox dependencies to workspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit besides harmonizing versions, the only global change is that the tokio-io feature of pxar is now implied since its default anyway, instead of being spelled out. Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index ee64b6e1..0b60e1cb 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,8 +14,8 @@ regex = "1.5.5" serde = { version = "1.0", features = ["derive"] } serde_plain = "1" -proxmox-lang = "1.0.0" -proxmox-schema = { version = "1.2.1", features = [ "api-macro" ] } -proxmox-serde = "0.1" -proxmox-time = "1.1.1" -proxmox-uuid = { version = "1.0.0", features = [ "serde" ] } +proxmox-lang.workspace=true +proxmox-schema = { workspace = true, features = [ "api-macro" ] } +proxmox-serde.workspace = true +proxmox-time.workspace = true +proxmox-uuid = { workspace = true, features = [ "serde" ] } From 8dbc2d73118a2001a6e0d2b971c957f419531bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 9 Dec 2022 13:22:58 +0100 Subject: [PATCH 185/299] switch regular dependencies to workspace ones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit where applicable. notable changes: - serde now uses 'derive' feature across the board - serde removed from pbs-tools (not used) - openssl bumped to 0.40 (and patched comment removed) - removed invalid zstd comment Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 0b60e1cb..94740c70 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -6,12 +6,12 @@ edition.workspace = true description = "general API type helpers for PBS" [dependencies] -anyhow = "1.0" -hex = "0.4.3" -lazy_static = "1.4" -percent-encoding = "2.1" -regex = "1.5.5" -serde = { version = "1.0", features = ["derive"] } +anyhow.workspace = true +hex.workspace = true +lazy_static.workspace = true +percent-encoding.workspace = true +regex.workspace = true +serde.workspace = true serde_plain = "1" proxmox-lang.workspace=true From 89052a009de7f177012f586cc406ff7c1302a846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 9 Dec 2022 13:52:03 +0100 Subject: [PATCH 186/299] switch remaining member dependencies to workspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit these are only used by a single member at the moment, but we can move them to the workspace to have a single location for version + base feature set specification. Signed-off-by: Fabian Grünbichler --- pbs-api-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 94740c70..4020fc20 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -12,7 +12,7 @@ lazy_static.workspace = true percent-encoding.workspace = true regex.workspace = true serde.workspace = true -serde_plain = "1" +serde_plain.workspace = true proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } From 915f6ab5d08b719f96c8de87404f66e9f0f18927 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 15 Dec 2022 17:34:13 +0100 Subject: [PATCH 187/299] derive Clone and PartialEq for some API types This is useful for react-lik GUI toolkits which need to do VDOM diffs. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/datastore.rs | 10 +++++----- pbs-api-types/src/human_byte.rs | 2 +- pbs-api-types/src/jobs.rs | 19 +++++++++++++++---- pbs-api-types/src/lib.rs | 4 ++-- pbs-api-types/src/remote.rs | 4 ++-- pbs-api-types/src/tape/media.rs | 2 +- pbs-api-types/src/traffic_control.rs | 2 +- pbs-api-types/src/user.rs | 4 ++-- 8 files changed, 29 insertions(+), 18 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index d75ead90..72e8d1ee 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -270,7 +270,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore }, } )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Datastore configuration properties. pub struct DataStoreConfig { @@ -354,7 +354,7 @@ impl DataStoreConfig { } }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic information about a datastore. pub struct DataStoreListItem { @@ -1141,7 +1141,7 @@ pub struct GroupListItem { } #[api()] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic information about a backup namespace. pub struct NamespaceListItem { @@ -1223,7 +1223,7 @@ pub struct TypeCounts { }, }, )] -#[derive(Clone, Default, Serialize, Deserialize)] +#[derive(Clone, Default, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] /// Garbage collection status. pub struct GarbageCollectionStatus { @@ -1295,7 +1295,7 @@ pub struct DataStoreStatus { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Status of a Datastore pub struct DataStoreStatusListItem { diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs index f9ac6d53..189a645c 100644 --- a/pbs-api-types/src/human_byte.rs +++ b/pbs-api-types/src/human_byte.rs @@ -121,7 +121,7 @@ fn strip_unit(v: &str) -> (&str, SizeUnit) { } /// Byte size which can be displayed in a human friendly way -#[derive(Debug, Copy, Clone, UpdaterType)] +#[derive(Debug, Copy, Clone, UpdaterType, PartialEq)] pub struct HumanByte { /// The siginficant value, it does not includes any factor of the `unit` size: f64, diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 7f029af7..cf7618c4 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -87,7 +87,7 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( }, } )] -#[derive(Serialize, Deserialize, Default)] +#[derive(Serialize, Deserialize, Default, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Job Scheduling Status pub struct JobScheduleStatus { @@ -392,6 +392,17 @@ pub enum GroupFilter { Regex(Regex), } +impl PartialEq for GroupFilter { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::BackupType(a), Self::BackupType(b)) => a == b, + (Self::Group(a), Self::Group(b)) => a == b, + (Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(), + _ => false, + } + } +} + impl std::str::FromStr for GroupFilter { type Err = anyhow::Error; @@ -484,7 +495,7 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = }, } )] -#[derive(Serialize, Deserialize, Clone, Updater)] +#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] #[serde(rename_all = "kebab-case")] /// Sync Job pub struct SyncJobConfig { @@ -532,7 +543,7 @@ impl SyncJobConfig { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Status of Sync Job pub struct SyncJobStatus { @@ -572,7 +583,7 @@ pub struct SyncJobStatus { }, } )] -#[derive(Serialize, Deserialize, Default, Updater)] +#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Common pruning options pub struct KeepOptions { diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 18cde45e..5e043954 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -492,7 +492,7 @@ pub enum RRDTimeFrame { } #[api] -#[derive(Deserialize, Serialize, PartialEq, Eq)] +#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] #[serde(rename_all = "lowercase")] /// type of the realm pub enum RealmType { @@ -518,7 +518,7 @@ pub enum RealmType { }, }, )] -#[derive(Deserialize, Serialize)] +#[derive(Deserialize, Serialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic Information about a realm pub struct BasicRealmInfo { diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index 890e31c0..e7912ee0 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -46,7 +46,7 @@ pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") }, }, )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Remote configuration properties. pub struct RemoteConfig { @@ -96,7 +96,7 @@ pub struct Remote { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Remote properties. pub struct RemoteWithoutPassword { diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs index c2c25da0..6792cd3c 100644 --- a/pbs-api-types/src/tape/media.rs +++ b/pbs-api-types/src/tape/media.rs @@ -149,7 +149,7 @@ pub struct LabelUuidMap { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Media content list entry pub struct MediaContentEntry { diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 3ed579cf..947df38a 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -48,7 +48,7 @@ pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = }, }, )] -#[derive(Serialize, Deserialize, Default, Clone, Updater)] +#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)] #[serde(rename_all = "kebab-case")] /// Rate Limit Configuration pub struct RateLimitConfig { diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 56d82537..a7481190 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -75,7 +75,7 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.") }, } )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] /// User properties with added list of ApiTokens pub struct UserWithTokens { pub userid: Userid, @@ -114,7 +114,7 @@ pub struct UserWithTokens { }, } )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] /// ApiToken properties. pub struct ApiToken { pub tokenid: Authid, From 61c3ac5b1bdea2e5afd3f7c9835dcc5928e8309e Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 5 Jan 2023 10:57:11 +0100 Subject: [PATCH 188/299] fix non-camel-case enums This should have never been started to begin with... --- pbs-api-types/src/network.rs | 48 +++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index 4b0671c5..361c20e4 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -1,3 +1,5 @@ +use std::fmt; + use serde::{Deserialize, Serialize}; use proxmox_schema::*; @@ -59,42 +61,64 @@ pub enum NetworkConfigMethod { #[api()] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] -#[allow(non_camel_case_types)] #[repr(u8)] /// Linux Bond Mode pub enum LinuxBondMode { /// Round-robin policy - balance_rr = 0, + BalanceRr = 0, /// Active-backup policy - active_backup = 1, + ActiveBackup = 1, /// XOR policy - balance_xor = 2, + BalanceXor = 2, /// Broadcast policy - broadcast = 3, + Broadcast = 3, /// IEEE 802.3ad Dynamic link aggregation #[serde(rename = "802.3ad")] - ieee802_3ad = 4, + Ieee802_3ad = 4, /// Adaptive transmit load balancing - balance_tlb = 5, + BalanceTlb = 5, /// Adaptive load balancing - balance_alb = 6, + BalanceAlb = 6, +} + +impl fmt::Display for LinuxBondMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match self { + LinuxBondMode::BalanceRr => "balance-rr", + LinuxBondMode::ActiveBackup => "active-backup", + LinuxBondMode::BalanceXor => "balance-xor", + LinuxBondMode::Broadcast => "broadcast", + LinuxBondMode::Ieee802_3ad => "802.3ad", + LinuxBondMode::BalanceTlb => "balance-tlb", + LinuxBondMode::BalanceAlb => "balance-alb", + }) + } } #[api()] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] -#[allow(non_camel_case_types)] #[repr(u8)] /// Bond Transmit Hash Policy for LACP (802.3ad) pub enum BondXmitHashPolicy { /// Layer 2 - layer2 = 0, + Layer2 = 0, /// Layer 2+3 #[serde(rename = "layer2+3")] - layer2_3 = 1, + Layer2_3 = 1, /// Layer 3+4 #[serde(rename = "layer3+4")] - layer3_4 = 2, + Layer3_4 = 2, +} + +impl fmt::Display for BondXmitHashPolicy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match self { + BondXmitHashPolicy::Layer2 => "layer2", + BondXmitHashPolicy::Layer2_3 => "layer2+3", + BondXmitHashPolicy::Layer3_4 => "layer3+4", + }) + } } #[api()] From dd67737942d151511411216679d1c8dd718205f8 Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Thu, 9 Feb 2023 14:31:15 +0100 Subject: [PATCH 189/299] api-types: add LDAP configuration type The properties are mainly based on the ones from PVE, except: * consistent use of kebab-cases * `mode` replaces deprecated `secure` Signed-off-by: Lukas Wagner --- pbs-api-types/src/ldap.rs | 78 +++++++++++++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 5 +++ 2 files changed, 83 insertions(+) create mode 100644 pbs-api-types/src/ldap.rs diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs new file mode 100644 index 00000000..06b8788d --- /dev/null +++ b/pbs-api-types/src/ldap.rs @@ -0,0 +1,78 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{api, Updater}; + +use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA}; + +#[api()] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +/// LDAP connection type +pub enum LdapMode { + /// Plaintext LDAP connection + #[serde(rename = "ldap")] + #[default] + Ldap, + /// Secure STARTTLS connection + #[serde(rename = "ldap+starttls")] + StartTls, + /// Secure LDAPS connection + #[serde(rename = "ldaps")] + Ldaps, +} + +#[api( + properties: { + "realm": { + schema: REALM_ID_SCHEMA, + }, + "comment": { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + "verify": { + optional: true, + default: false, + } + }, +)] +#[derive(Serialize, Deserialize, Updater, Clone)] +#[serde(rename_all = "kebab-case")] +/// LDAP configuration properties. +pub struct LdapRealmConfig { + #[updater(skip)] + pub realm: String, + /// LDAP server address + pub server1: String, + /// Fallback LDAP server address + #[serde(skip_serializing_if = "Option::is_none")] + pub server2: Option, + /// Port + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + /// Base domain name. Users are searched under this domain using a `subtree search`. + pub base_dn: String, + /// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``. + pub user_attr: String, + /// Comment + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// Connection security + #[serde(skip_serializing_if = "Option::is_none")] + pub mode: Option, + /// Verify server certificate + #[serde(skip_serializing_if = "Option::is_none")] + pub verify: Option, + /// CA certificate to use for the server. The path can point to + /// either a file, or a directory. If it points to a file, + /// the PEM-formatted X.509 certificate stored at the path + /// will be added as a trusted certificate. + /// If the path points to a directory, + /// the directory replaces the system's default certificate + /// store at `/etc/ssl/certs` - Every file in the directory + /// will be loaded as a trusted certificate. + #[serde(skip_serializing_if = "Option::is_none")] + pub capath: Option, + /// Bind domain to use for looking up users + #[serde(skip_serializing_if = "Option::is_none")] + pub bind_dn: Option, +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 5e043954..0479b637 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -108,6 +108,9 @@ pub mod file_restore; mod openid; pub use openid::*; +mod ldap; +pub use ldap::*; + mod remote; pub use remote::*; @@ -502,6 +505,8 @@ pub enum RealmType { Pbs, /// An OpenID Connect realm OpenId, + /// An LDAP realm + Ldap, } #[api( From b6b18f65bcda5b06ab771cbfaf2076f07862139c Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Thu, 9 Feb 2023 14:31:18 +0100 Subject: [PATCH 190/299] api-types: add config options for LDAP user sync Signed-off-by: Lukas Wagner --- pbs-api-types/src/ldap.rs | 125 +++++++++++++++++++++++++++++++++++++- 1 file changed, 123 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index 06b8788d..316b5a65 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, Updater}; +use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater}; use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA}; @@ -32,7 +32,19 @@ pub enum LdapMode { "verify": { optional: true, default: false, - } + }, + "sync-defaults-options": { + schema: SYNC_DEFAULTS_STRING_SCHEMA, + optional: true, + }, + "sync-attributes": { + schema: SYNC_ATTRIBUTES_SCHEMA, + optional: true, + }, + "user-classes" : { + optional: true, + schema: USER_CLASSES_SCHEMA, + }, }, )] #[derive(Serialize, Deserialize, Updater, Clone)] @@ -75,4 +87,113 @@ pub struct LdapRealmConfig { /// Bind domain to use for looking up users #[serde(skip_serializing_if = "Option::is_none")] pub bind_dn: Option, + /// Custom LDAP search filter for user sync + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + /// Default options for LDAP sync + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_defaults_options: Option, + /// List of attributes to sync from LDAP to user config + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_attributes: Option, + /// User ``objectClass`` classes to sync + #[serde(skip_serializing_if = "Option::is_none")] + pub user_classes: Option, } + +#[api( + properties: { + "remove-vanished": { + optional: true, + schema: REMOVE_VANISHED_SCHEMA, + }, + }, + +)] +#[derive(Serialize, Deserialize, Updater, Default, Debug)] +#[serde(rename_all = "kebab-case")] +/// Default options for LDAP synchronization runs +pub struct SyncDefaultsOptions { + /// How to handle vanished properties/users + pub remove_vanished: Option, + /// Enable new users after sync + pub enable_new: Option, +} + +#[api()] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "kebab-case")] +/// remove-vanished options +pub enum RemoveVanished { + /// Delete ACLs for vanished users + Acl, + /// Remove vanished users + Entry, + /// Remove vanished properties from users (e.g. email) + Properties, +} + +pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options") + .format(&ApiStringFormat::PropertyString( + &SyncDefaultsOptions::API_SCHEMA, + )) + .schema(); + +const REMOVE_VANISHED_DESCRIPTION: &str = + "A semicolon-seperated list of things to remove when they or the user \ +vanishes during user synchronization. The following values are possible: ``entry`` removes the \ +user when not returned from the sync; ``properties`` removes any \ +properties on existing user that do not appear in the source. \ +``acl`` removes ACLs when the user is not returned from the sync."; + +pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION) + .format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY)) + .schema(); + +pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new( + "Array of remove-vanished options", + &RemoveVanished::API_SCHEMA, +) +.min_length(1) +.schema(); + +#[api()] +#[derive(Serialize, Deserialize, Updater, Default, Debug)] +#[serde(rename_all = "kebab-case")] +/// Determine which LDAP attributes should be synced to which user attributes +pub struct SyncAttributes { + /// Name of the LDAP attribute containing the user's email address + pub email: Option, + /// Name of the LDAP attribute containing the user's first name + pub firstname: Option, + /// Name of the LDAP attribute containing the user's last name + pub lastname: Option, +} + +const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \ +which LDAP attributes map to which PBS user field. For example, \ +to map the LDAP attribute ``mail`` to PBS's ``email``, write \ +``email=mail``."; + +pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT) + .format(&ApiStringFormat::PropertyString( + &SyncAttributes::API_SCHEMA, + )) + .schema(); + +pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new( + "Array of user classes", + &StringSchema::new("user class").schema(), +) +.min_length(1) +.schema(); + +const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \ +user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \ +then user synchronization will consider all LDAP entities \ +where ``objectClass: person`` `or` ``objectClass: user``."; + +pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT) + .format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY)) + .default("inetorgperson,posixaccount,person,user") + .schema(); From 8b3d568bebbff81fc3eba15305c6dd4e1f0d7e7d Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Thu, 9 Feb 2023 14:31:19 +0100 Subject: [PATCH 191/299] server: add LDAP realm sync job This commit adds sync jobs for LDAP user sync. As of now, they can only be started manually. Signed-off-by: Lukas Wagner --- pbs-api-types/src/user.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index a7481190..21bf0e61 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -172,7 +172,7 @@ impl ApiToken { }, } )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)] /// User properties. pub struct User { #[updater(skip)] From 5720ba2dcea1e71b3f09d1cc8c158be9ca36c7e9 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 1 Feb 2023 16:01:12 +0100 Subject: [PATCH 192/299] use new auth api crate Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/lib.rs | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 4020fc20..b0092813 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -14,6 +14,7 @@ regex.workspace = true serde.workspace = true serde_plain.workspace = true +proxmox-auth-api = { workspace = true, features = [ "api-types" ] } proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-serde.workspace = true diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 0479b637..ec8b1f34 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -2,6 +2,8 @@ use serde::{Deserialize, Serialize}; +use proxmox_auth_api::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR}; + pub mod common_regex; pub mod percent_encoding; @@ -85,14 +87,14 @@ pub use maintenance::*; mod network; pub use network::*; -#[macro_use] -mod userid; -pub use userid::Authid; -pub use userid::Userid; -pub use userid::{Realm, RealmRef}; -pub use userid::{Tokenname, TokennameRef}; -pub use userid::{Username, UsernameRef}; -pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; +pub use proxmox_auth_api::types as userid; +pub use proxmox_auth_api::types::{Authid, Userid}; +pub use proxmox_auth_api::types::{Realm, RealmRef}; +pub use proxmox_auth_api::types::{Tokenname, TokennameRef}; +pub use proxmox_auth_api::types::{Username, UsernameRef}; +pub use proxmox_auth_api::types::{ + PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, +}; #[macro_use] mod user; From 3aba0d9aa64e821aa6fa9e77d037544c64dc74b4 Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Tue, 28 Mar 2023 16:20:14 +0200 Subject: [PATCH 193/299] api-types: ldap: add verification regex for LDAP DNs Regex was taken from the LDAP implementation in PVE. Signed-off-by: Lukas Wagner --- pbs-api-types/src/ldap.rs | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index 316b5a65..eabc5249 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -1,6 +1,8 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater}; +use proxmox_schema::{ + api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater, +}; use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA}; @@ -45,6 +47,13 @@ pub enum LdapMode { optional: true, schema: USER_CLASSES_SCHEMA, }, + "base-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + }, + "bind-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + optional: true, + } }, )] #[derive(Serialize, Deserialize, Updater, Clone)] @@ -133,6 +142,28 @@ pub enum RemoveVanished { Properties, } +macro_rules! DOMAIN_PART_REGEX { + () => { + r#"("[^"]+"|[^ ,+"/<>;=#][^,+"/<>;=]*[^ ,+"/<>;=]|[^ ,+"/<>;=#])"# + }; +} + +const_regex! { + pub LDAP_DOMAIN_REGEX = concat!( + r#"\w+="#, + DOMAIN_PART_REGEX!(), + r#"(,\s*\w+="#, + DOMAIN_PART_REGEX!(), + ")*" + ); +} + +pub const LDAP_DOMAIN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&LDAP_DOMAIN_REGEX); + +pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain") + .format(&LDAP_DOMAIN_FORMAT) + .schema(); + pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options") .format(&ApiStringFormat::PropertyString( &SyncDefaultsOptions::API_SCHEMA, From 92cca750d9a98da19d2c5c0dcacae9c72e579706 Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Wed, 29 Mar 2023 11:22:41 +0200 Subject: [PATCH 194/299] api-types: ldap: properly anchor DN regex Otherwise, a substring match is enough to fulfill the constraint. Fixes: 3aba0d9a ("api-types: ldap: add verification regex for LDAP DNs") Reported-by: Friedrich Weber Signed-off-by: Lukas Wagner --- pbs-api-types/src/ldap.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index eabc5249..762f560a 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -150,11 +150,11 @@ macro_rules! DOMAIN_PART_REGEX { const_regex! { pub LDAP_DOMAIN_REGEX = concat!( - r#"\w+="#, + r#"^\w+="#, DOMAIN_PART_REGEX!(), r#"(,\s*\w+="#, DOMAIN_PART_REGEX!(), - ")*" + ")*$" ); } From cd0d1cbc62e4d33d0c1a4b84abe94fccf555065b Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 29 Mar 2023 11:58:57 +0200 Subject: [PATCH 195/299] api-types: anchor datastore-map regex Fixes: 4c4e5c2b ("api2/tape/restore: enable restore mapping of datastores") Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 72e8d1ee..7f8c1f09 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -34,7 +34,7 @@ const_regex! { pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$"); - pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); + pub DATASTORE_MAP_REGEX = concat!(r"^(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); } pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); From 175a8c6d7efe470d84181076ccb89c5595c91a20 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 30 Mar 2023 09:20:42 +0200 Subject: [PATCH 196/299] api types: fix non-capturing group syntax a non capturing group is '(?:)' not '(:?)' so fix that. None of these regexes are used where would use capturing groups. DATASTORE_MAP_REGEX and TAPE_RESTORE_SNAPSHOT_REGEX are only used as api types and BLOCKDEVICE_NAME_REGEX is only used once outside of the api and there we also don't look at the capturing groups. Signed-off-by: Dominik Csapak --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/lib.rs | 2 +- pbs-api-types/src/tape/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 7f8c1f09..73c4890e 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -34,7 +34,7 @@ const_regex! { pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$"); - pub DATASTORE_MAP_REGEX = concat!(r"^(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); + pub DATASTORE_MAP_REGEX = concat!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); } pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index ec8b1f34..2a5c1932 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -190,7 +190,7 @@ const_regex! { "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$" ); - pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$"; + pub BLOCKDEVICE_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+)|(?:nvme\d+n\d+)$"; pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); } diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index 747b0bcd..99d7cb74 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -30,7 +30,7 @@ use proxmox_uuid::Uuid; use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; const_regex! { - pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(:?", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(?:", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$"); } pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = From f971b8c1e6b853a36df669c452e28c6ef5c7cf4d Mon Sep 17 00:00:00 2001 From: Stefan Hanreich Date: Tue, 18 Apr 2023 16:59:45 +0200 Subject: [PATCH 197/299] partial fix #3701: sync job: pull: add transfer-last parameter Specifying the transfer-last parameter limits the amount of backups that get synced via the pull command/sync job. The parameter specifies how many of the N latest backups should get pulled/synced. All other backups will get skipped. This is particularly useful in situations where the sync target has less disk space than the source. Syncing all backups from the source is not possible if there is not enough disk space on the target. Additionally this can be used for limiting the amount of data transferred, reducing load on the network. The newest backup will always get re-synced, regardless of the setting of the transfer-last parameter. Signed-off-by: Stefan Hanreich --- pbs-api-types/src/jobs.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index cf7618c4..23e19b7b 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -444,6 +444,11 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); +pub const TRANSFER_LAST_SCHEMA: Schema = + IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others") + .minimum(1) + .schema(); + #[api( properties: { id: { @@ -493,6 +498,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = schema: GROUP_FILTER_LIST_SCHEMA, optional: true, }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -522,6 +531,8 @@ pub struct SyncJobConfig { pub group_filter: Option>, #[serde(flatten)] pub limit: RateLimitConfig, + #[serde(skip_serializing_if = "Option::is_none")] + pub transfer_last: Option, } impl SyncJobConfig { From 053e83c3c704bf5acfb4d82fb1233ae282ea24ab Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Mon, 8 May 2023 12:01:37 +0200 Subject: [PATCH 198/299] api-types: client: datastore: tools: use proxmox-human-bytes crate Signed-off-by: Lukas Wagner --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/human_byte.rs | 358 --------------------------- pbs-api-types/src/lib.rs | 3 - pbs-api-types/src/traffic_control.rs | 4 +- 4 files changed, 3 insertions(+), 363 deletions(-) delete mode 100644 pbs-api-types/src/human_byte.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index b0092813..31b69f62 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -15,6 +15,7 @@ serde.workspace = true serde_plain.workspace = true proxmox-auth-api = { workspace = true, features = [ "api-types" ] } +proxmox-human-byte.workspace = true proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-serde.workspace = true diff --git a/pbs-api-types/src/human_byte.rs b/pbs-api-types/src/human_byte.rs deleted file mode 100644 index 189a645c..00000000 --- a/pbs-api-types/src/human_byte.rs +++ /dev/null @@ -1,358 +0,0 @@ -use anyhow::{bail, Error}; - -use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType}; - -/// Size units for byte sizes -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum SizeUnit { - Byte, - // SI (base 10) - KByte, - MByte, - GByte, - TByte, - PByte, - // IEC (base 2) - Kibi, - Mebi, - Gibi, - Tebi, - Pebi, -} - -impl SizeUnit { - /// Returns the scaling factor - pub fn factor(&self) -> f64 { - match self { - SizeUnit::Byte => 1.0, - // SI (base 10) - SizeUnit::KByte => 1_000.0, - SizeUnit::MByte => 1_000_000.0, - SizeUnit::GByte => 1_000_000_000.0, - SizeUnit::TByte => 1_000_000_000_000.0, - SizeUnit::PByte => 1_000_000_000_000_000.0, - // IEC (base 2) - SizeUnit::Kibi => 1024.0, - SizeUnit::Mebi => 1024.0 * 1024.0, - SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0, - SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0, - SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0, - } - } - - /// gets the biggest possible unit still having a value greater zero before the decimal point - /// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones - pub fn auto_scale(size: f64, binary: bool) -> SizeUnit { - if binary { - let bits = 64 - (size as u64).leading_zeros(); - match bits { - 51.. => SizeUnit::Pebi, - 41..=50 => SizeUnit::Tebi, - 31..=40 => SizeUnit::Gibi, - 21..=30 => SizeUnit::Mebi, - 11..=20 => SizeUnit::Kibi, - _ => SizeUnit::Byte, - } - } else if size >= 1_000_000_000_000_000.0 { - SizeUnit::PByte - } else if size >= 1_000_000_000_000.0 { - SizeUnit::TByte - } else if size >= 1_000_000_000.0 { - SizeUnit::GByte - } else if size >= 1_000_000.0 { - SizeUnit::MByte - } else if size >= 1_000.0 { - SizeUnit::KByte - } else { - SizeUnit::Byte - } - } -} - -/// Returns the string representation -impl std::fmt::Display for SizeUnit { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - SizeUnit::Byte => write!(f, "B"), - // SI (base 10) - SizeUnit::KByte => write!(f, "KB"), - SizeUnit::MByte => write!(f, "MB"), - SizeUnit::GByte => write!(f, "GB"), - SizeUnit::TByte => write!(f, "TB"), - SizeUnit::PByte => write!(f, "PB"), - // IEC (base 2) - SizeUnit::Kibi => write!(f, "KiB"), - SizeUnit::Mebi => write!(f, "MiB"), - SizeUnit::Gibi => write!(f, "GiB"), - SizeUnit::Tebi => write!(f, "TiB"), - SizeUnit::Pebi => write!(f, "PiB"), - } - } -} - -/// Strips a trailing SizeUnit inclusive trailing whitespace -/// Supports both IEC and SI based scales, the B/b byte symbol is optional. -fn strip_unit(v: &str) -> (&str, SizeUnit) { - let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway - - let (v, binary) = match v.strip_suffix('i') { - Some(n) => (n, true), - None => (v, false), - }; - - let mut unit = SizeUnit::Byte; - #[rustfmt::skip] - let value = v.strip_suffix(|c: char| match c { - 'k' | 'K' if !binary => { unit = SizeUnit::KByte; true } - 'm' | 'M' if !binary => { unit = SizeUnit::MByte; true } - 'g' | 'G' if !binary => { unit = SizeUnit::GByte; true } - 't' | 'T' if !binary => { unit = SizeUnit::TByte; true } - 'p' | 'P' if !binary => { unit = SizeUnit::PByte; true } - // binary (IEC recommended) variants - 'k' | 'K' if binary => { unit = SizeUnit::Kibi; true } - 'm' | 'M' if binary => { unit = SizeUnit::Mebi; true } - 'g' | 'G' if binary => { unit = SizeUnit::Gibi; true } - 't' | 'T' if binary => { unit = SizeUnit::Tebi; true } - 'p' | 'P' if binary => { unit = SizeUnit::Pebi; true } - _ => false - }).unwrap_or(v).trim_end(); - - (value, unit) -} - -/// Byte size which can be displayed in a human friendly way -#[derive(Debug, Copy, Clone, UpdaterType, PartialEq)] -pub struct HumanByte { - /// The siginficant value, it does not includes any factor of the `unit` - size: f64, - /// The scale/unit of the value - unit: SizeUnit, -} - -fn verify_human_byte(s: &str) -> Result<(), Error> { - match s.parse::() { - Ok(_) => Ok(()), - Err(err) => bail!("byte-size parse error for '{}': {}", s, err), - } -} -impl ApiType for HumanByte { - const API_SCHEMA: Schema = StringSchema::new( - "Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", - ) - .format(&ApiStringFormat::VerifyFn(verify_human_byte)) - .min_length(1) - .max_length(64) - .schema(); -} - -impl HumanByte { - /// Create instance with size and unit (size must be positive) - pub fn with_unit(size: f64, unit: SizeUnit) -> Result { - if size < 0.0 { - bail!("byte size may not be negative"); - } - Ok(HumanByte { size, unit }) - } - - /// Create a new instance with optimal binary unit computed - pub fn new_binary(size: f64) -> Self { - let unit = SizeUnit::auto_scale(size, true); - HumanByte { - size: size / unit.factor(), - unit, - } - } - - /// Create a new instance with optimal decimal unit computed - pub fn new_decimal(size: f64) -> Self { - let unit = SizeUnit::auto_scale(size, false); - HumanByte { - size: size / unit.factor(), - unit, - } - } - - /// Returns the size as u64 number of bytes - pub fn as_u64(&self) -> u64 { - self.as_f64() as u64 - } - - /// Returns the size as f64 number of bytes - pub fn as_f64(&self) -> f64 { - self.size * self.unit.factor() - } - - /// Returns a copy with optimal binary unit computed - pub fn auto_scale_binary(self) -> Self { - HumanByte::new_binary(self.as_f64()) - } - - /// Returns a copy with optimal decimal unit computed - pub fn auto_scale_decimal(self) -> Self { - HumanByte::new_decimal(self.as_f64()) - } -} - -impl From for HumanByte { - fn from(v: u64) -> Self { - HumanByte::new_binary(v as f64) - } -} -impl From for HumanByte { - fn from(v: usize) -> Self { - HumanByte::new_binary(v as f64) - } -} - -impl std::fmt::Display for HumanByte { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let precision = f.precision().unwrap_or(3) as f64; - let precision_factor = 1.0 * 10.0_f64.powf(precision); - // this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet - let size = ((self.size * precision_factor).round()) / precision_factor; - write!(f, "{} {}", size, self.unit) - } -} - -impl std::str::FromStr for HumanByte { - type Err = Error; - - fn from_str(v: &str) -> Result { - let (v, unit) = strip_unit(v); - HumanByte::with_unit(v.parse()?, unit) - } -} - -proxmox_serde::forward_deserialize_to_from_str!(HumanByte); -proxmox_serde::forward_serialize_to_display!(HumanByte); - -#[test] -fn test_human_byte_parser() -> Result<(), Error> { - assert!("-10".parse::().is_err()); // negative size - - fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> { - let h: HumanByte = v.parse()?; - - if h.size != size { - bail!("got unexpected size for '{}' ({} != {})", v, h.size, size); - } - if h.unit != unit { - bail!( - "got unexpected unit for '{}' ({:?} != {:?})", - v, - h.unit, - unit - ); - } - - let new = h.to_string(); - if new != *as_str { - bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str); - } - Ok(()) - } - fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool { - match do_test(v, size, unit, as_str) { - Ok(_) => true, - Err(err) => { - eprintln!("{}", err); // makes debugging easier - false - } - } - } - - assert!(test("14", 14.0, SizeUnit::Byte, "14 B")); - assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B")); - assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B")); - assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B")); - assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B")); - - let h: HumanByte = "1.2345678".parse()?; - assert_eq!(&format!("{:.0}", h), "1 B"); - assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit - assert_eq!(&format!("{:.1}", h), "1.2 B"); - assert_eq!(&format!("{:.2}", h), "1.23 B"); - assert_eq!(&format!("{:.3}", h), "1.235 B"); - assert_eq!(&format!("{:.4}", h), "1.2346 B"); - assert_eq!(&format!("{:.5}", h), "1.23457 B"); - assert_eq!(&format!("{:.6}", h), "1.234568 B"); - assert_eq!(&format!("{:.7}", h), "1.2345678 B"); - assert_eq!(&format!("{:.8}", h), "1.2345678 B"); - - assert!(test( - "987654321", - 987654321.0, - SizeUnit::Byte, - "987654321 B" - )); - - assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B")); - assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B")); - assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B")); - assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B")); - - assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB")); - assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB")); - assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB")); - - assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB")); - - assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB")); - assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB")); - - assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB")); - assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB")); - assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB")); - - assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB")); - assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB")); - - assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB")); - - Ok(()) -} - -#[test] -fn test_human_byte_auto_unit_decimal() { - fn convert(b: u64) -> String { - HumanByte::new_decimal(b as f64).to_string() - } - assert_eq!(convert(987), "987 B"); - assert_eq!(convert(1022), "1.022 KB"); - assert_eq!(convert(9_000), "9 KB"); - assert_eq!(convert(1_000), "1 KB"); - assert_eq!(convert(1_000_000), "1 MB"); - assert_eq!(convert(1_000_000_000), "1 GB"); - assert_eq!(convert(1_000_000_000_000), "1 TB"); - assert_eq!(convert(1_000_000_000_000_000), "1 PB"); - - assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB"); - assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB"); - assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB"); -} - -#[test] -fn test_human_byte_auto_unit_binary() { - fn convert(b: u64) -> String { - HumanByte::from(b).to_string() - } - assert_eq!(convert(0), "0 B"); - assert_eq!(convert(987), "987 B"); - assert_eq!(convert(1022), "1022 B"); - assert_eq!(convert(9_000), "8.789 KiB"); - assert_eq!(convert(10_000_000), "9.537 MiB"); - assert_eq!(convert(10_000_000_000), "9.313 GiB"); - assert_eq!(convert(10_000_000_000_000), "9.095 TiB"); - - assert_eq!(convert(1 << 10), "1 KiB"); - assert_eq!(convert((1 << 10) * 10), "10 KiB"); - assert_eq!(convert(1 << 20), "1 MiB"); - assert_eq!(convert(1 << 30), "1 GiB"); - assert_eq!(convert(1 << 40), "1 TiB"); - assert_eq!(convert(1 << 50), "1 PiB"); - - assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB"); - assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB"); - assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB"); - assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB"); -} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 2a5c1932..4764c51a 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -72,9 +72,6 @@ pub use acl::*; mod datastore; pub use datastore::*; -mod human_byte; -pub use human_byte::HumanByte; - mod jobs; pub use jobs::*; diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 947df38a..24195e44 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -1,10 +1,10 @@ use serde::{Deserialize, Serialize}; +use proxmox_human_byte::HumanByte; use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; use crate::{ - HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, - SINGLE_LINE_COMMENT_SCHEMA, + CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, }; pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = From f486f8485c832b6c56de7fa20ca7d049f8a25946 Mon Sep 17 00:00:00 2001 From: Stefan Sterz Date: Mon, 26 Jun 2023 15:17:46 +0200 Subject: [PATCH 199/299] access: ldap check connection on creation and change this commit makes the ldap realm endpoints check whether a new or updated configuration works correctly. it uses the new `check_connection` function to make sure that a configuration can be successfully used to connect to and query an ldap directory. doing so allows us to remove the ldap domain regex. instead of relying on a regex to make sure that a given distinguished name (dn) could be correct, we simply let the ldap directory tell us whether it accepts it. this should also aid with usability as a dn that looks correct could still be invalid. this also implicitly removes unauthenticated binds, since the new `check_connection` function does not support those. it will simply bail out of the check if a `bind_dn` but no password is configured. therefore, this is a breaking change. Signed-off-by: Stefan Sterz --- pbs-api-types/src/ldap.rs | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index 762f560a..f3df90a0 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -1,8 +1,6 @@ use serde::{Deserialize, Serialize}; -use proxmox_schema::{ - api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater, -}; +use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater}; use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA}; @@ -142,27 +140,7 @@ pub enum RemoveVanished { Properties, } -macro_rules! DOMAIN_PART_REGEX { - () => { - r#"("[^"]+"|[^ ,+"/<>;=#][^,+"/<>;=]*[^ ,+"/<>;=]|[^ ,+"/<>;=#])"# - }; -} - -const_regex! { - pub LDAP_DOMAIN_REGEX = concat!( - r#"^\w+="#, - DOMAIN_PART_REGEX!(), - r#"(,\s*\w+="#, - DOMAIN_PART_REGEX!(), - ")*$" - ); -} - -pub const LDAP_DOMAIN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&LDAP_DOMAIN_REGEX); - -pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain") - .format(&LDAP_DOMAIN_FORMAT) - .schema(); +pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema(); pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options") .format(&ApiStringFormat::PropertyString( From 902a0e8cb5b398bc2df40d7ec5e8c325a8dda4bc Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Fri, 23 Jun 2023 14:30:36 +0200 Subject: [PATCH 200/299] api: include tfa lock status in user list Like in PVE. This means that /access/users is now a 'protected' call to get access to 'tfa.cfg'. Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/user.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 21bf0e61..595c8702 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -73,9 +73,20 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.") type: ApiToken }, }, + "totp-locked": { + type: bool, + optional: true, + default: false, + description: "True if the user is currently locked out of TOTP factors", + }, + "tfa-locked-until": { + optional: true, + description: "Contains a timestamp until when a user is locked out of 2nd factors", + }, } )] #[derive(Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "kebab-case")] /// User properties with added list of ApiTokens pub struct UserWithTokens { pub userid: Userid, @@ -93,6 +104,14 @@ pub struct UserWithTokens { pub email: Option, #[serde(skip_serializing_if = "Vec::is_empty", default)] pub tokens: Vec, + #[serde(skip_serializing_if = "bool_is_false")] + pub totp_locked: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub tfa_locked_until: Option, +} + +fn bool_is_false(b: &bool) -> bool { + !b } #[api( From 2226107cc6e3adaa6f9723568a84fc910c1630c7 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Tue, 1 Aug 2023 09:10:45 +0200 Subject: [PATCH 201/299] api-types: set serde defaults for UserWithTokens since `totp_locked` is not wrapped in an `Option` we need to explicitly tell serde about its default Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/user.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs index 595c8702..42f41266 100644 --- a/pbs-api-types/src/user.rs +++ b/pbs-api-types/src/user.rs @@ -104,7 +104,7 @@ pub struct UserWithTokens { pub email: Option, #[serde(skip_serializing_if = "Vec::is_empty", default)] pub tokens: Vec, - #[serde(skip_serializing_if = "bool_is_false")] + #[serde(skip_serializing_if = "bool_is_false", default)] pub totp_locked: bool, #[serde(skip_serializing_if = "Option::is_none")] pub tfa_locked_until: Option, From 9fc23c13356720d086fe82884fcbdb347162ad79 Mon Sep 17 00:00:00 2001 From: Christoph Heiss Date: Mon, 7 Aug 2023 09:57:24 +0200 Subject: [PATCH 202/299] api-types: drop unused leftover file Commit 5720ba2d ("use new auth api crate") moved all auth-related code into it's own crate inside the `proxmox` repo, including this file. Thus drop it here, it's not even included in the compile. Signed-off-by: Christoph Heiss --- pbs-api-types/src/userid.rs | 716 ------------------------------------ 1 file changed, 716 deletions(-) delete mode 100644 pbs-api-types/src/userid.rs diff --git a/pbs-api-types/src/userid.rs b/pbs-api-types/src/userid.rs deleted file mode 100644 index 052e66ed..00000000 --- a/pbs-api-types/src/userid.rs +++ /dev/null @@ -1,716 +0,0 @@ -//! Types for user handling. -//! -//! We have [`Username`]s, [`Realm`]s and [`Tokenname`]s. To uniquely identify a user/API token, they -//! must be combined into a [`Userid`] or [`Authid`]. -//! -//! Since they're all string types, they're organized as follows: -//! -//! * [`Username`]: an owned user name. Internally a `String`. -//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs -//! with `String`, meaning you can only make references to it. -//! * [`Realm`]: an owned realm (`String` equivalent). -//! * [`RealmRef`]: a borrowed realm (`str` equivalent). -//! * [`Tokenname`]: an owned API token name (`String` equivalent) -//! * [`TokennameRef`]: a borrowed `Tokenname` (`str` equivalent). -//! * [`Userid`]: an owned user id (`"user@realm"`). -//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`). -//! Note that `Userid` and `Authid` do not have a separate borrowed type. -//! -//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be -//! compared directly. If a direct comparison is really required, they can be compared as strings -//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other hand can be -//! compared with each other, as in those cases the comparison has meaning. - -use std::borrow::Borrow; -use std::fmt; - -use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{ - api, const_regex, ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType, -}; - -// we only allow a limited set of characters -// colon is not allowed, because we store usernames in -// colon separated lists)! -// slash is not allowed because it is used as pve API delimiter -// also see "man useradd" -#[macro_export] -macro_rules! USER_NAME_REGEX_STR { - () => { - r"(?:[^\s:/[:cntrl:]]+)" - }; -} -#[macro_export] -macro_rules! GROUP_NAME_REGEX_STR { - () => { - USER_NAME_REGEX_STR!() - }; -} -#[macro_export] -macro_rules! TOKEN_NAME_REGEX_STR { - () => { - PROXMOX_SAFE_ID_REGEX_STR!() - }; -} -#[macro_export] -macro_rules! USER_ID_REGEX_STR { - () => { - concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!()) - }; -} -#[macro_export] -macro_rules! APITOKEN_ID_REGEX_STR { - () => { - concat!(USER_ID_REGEX_STR!(), r"!", TOKEN_NAME_REGEX_STR!()) - }; -} - -const_regex! { - pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$"); - pub PROXMOX_TOKEN_NAME_REGEX = concat!(r"^", TOKEN_NAME_REGEX_STR!(), r"$"); - pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$"); - pub PROXMOX_APITOKEN_ID_REGEX = concat!(r"^", APITOKEN_ID_REGEX_STR!(), r"$"); - pub PROXMOX_AUTH_ID_REGEX = concat!(r"^", r"(?:", USER_ID_REGEX_STR!(), r"|", APITOKEN_ID_REGEX_STR!(), r")$"); - pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$"); -} - -pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX); -pub const PROXMOX_TOKEN_NAME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_TOKEN_NAME_REGEX); - -pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX); -pub const PROXMOX_TOKEN_ID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_APITOKEN_ID_REGEX); -pub const PROXMOX_AUTH_ID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_AUTH_ID_REGEX); - -pub const PROXMOX_TOKEN_ID_SCHEMA: Schema = StringSchema::new("API Token ID") - .format(&PROXMOX_TOKEN_ID_FORMAT) - .min_length(3) - .max_length(64) - .schema(); - -pub const PROXMOX_TOKEN_NAME_SCHEMA: Schema = StringSchema::new("API Token name") - .format(&PROXMOX_TOKEN_NAME_FORMAT) - .min_length(3) - .max_length(64) - .schema(); - -pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX); - -pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID") - .format(&PROXMOX_GROUP_ID_FORMAT) - .min_length(3) - .max_length(64) - .schema(); - -pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema = - StringSchema::new("Authentication domain ID") - .format(&super::PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32); -pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema(); - -#[api( - type: String, - format: &PROXMOX_USER_NAME_FORMAT, - min_length: 1, -)] -/// The user name part of a user id. -/// -/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order -/// to compare user names directly, they need to be explicitly compared as strings by calling -/// `.as_str()`. -/// -/// ```compile_fail -/// fn test(a: Username, b: Username) -> bool { -/// a == b // illegal and does not compile -/// } -/// ``` -#[derive(Clone, Debug, Hash, Deserialize, Serialize)] -pub struct Username(String); - -/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user. -/// -/// This is like a `str` to the `String` of a [`Username`]. -#[derive(Debug, Hash)] -pub struct UsernameRef(str); - -impl UsernameRef { - fn new(s: &str) -> &Self { - unsafe { &*(s as *const str as *const UsernameRef) } - } - - pub fn as_str(&self) -> &str { - &self.0 - } -} - -impl std::ops::Deref for Username { - type Target = UsernameRef; - - fn deref(&self) -> &UsernameRef { - self.borrow() - } -} - -impl Borrow for Username { - fn borrow(&self) -> &UsernameRef { - UsernameRef::new(self.0.as_str()) - } -} - -impl AsRef for Username { - fn as_ref(&self) -> &UsernameRef { - self.borrow() - } -} - -impl ToOwned for UsernameRef { - type Owned = Username; - - fn to_owned(&self) -> Self::Owned { - Username(self.0.to_owned()) - } -} - -impl TryFrom for Username { - type Error = Error; - - fn try_from(s: String) -> Result { - if !PROXMOX_USER_NAME_REGEX.is_match(&s) { - bail!("invalid user name"); - } - - Ok(Self(s)) - } -} - -impl<'a> TryFrom<&'a str> for &'a UsernameRef { - type Error = Error; - - fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> { - if !PROXMOX_USER_NAME_REGEX.is_match(s) { - bail!("invalid name in user id"); - } - - Ok(UsernameRef::new(s)) - } -} - -#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)] -/// An authentication realm. -#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)] -pub struct Realm(String); - -/// A reference to an authentication realm. -/// -/// This is like a `str` to the `String` of a `Realm`. -#[derive(Debug, Hash, Eq, PartialEq)] -pub struct RealmRef(str); - -impl RealmRef { - fn new(s: &str) -> &Self { - unsafe { &*(s as *const str as *const RealmRef) } - } - - pub fn as_str(&self) -> &str { - &self.0 - } -} - -impl std::ops::Deref for Realm { - type Target = RealmRef; - - fn deref(&self) -> &RealmRef { - self.borrow() - } -} - -impl Borrow for Realm { - fn borrow(&self) -> &RealmRef { - RealmRef::new(self.0.as_str()) - } -} - -impl AsRef for Realm { - fn as_ref(&self) -> &RealmRef { - self.borrow() - } -} - -impl ToOwned for RealmRef { - type Owned = Realm; - - fn to_owned(&self) -> Self::Owned { - Realm(self.0.to_owned()) - } -} - -impl TryFrom for Realm { - type Error = Error; - - fn try_from(s: String) -> Result { - PROXMOX_AUTH_REALM_STRING_SCHEMA - .check_constraints(&s) - .map_err(|_| format_err!("invalid realm"))?; - - Ok(Self(s)) - } -} - -impl<'a> TryFrom<&'a str> for &'a RealmRef { - type Error = Error; - - fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> { - PROXMOX_AUTH_REALM_STRING_SCHEMA - .check_constraints(s) - .map_err(|_| format_err!("invalid realm"))?; - - Ok(RealmRef::new(s)) - } -} - -impl PartialEq for Realm { - fn eq(&self, rhs: &str) -> bool { - self.0 == rhs - } -} - -impl PartialEq<&str> for Realm { - fn eq(&self, rhs: &&str) -> bool { - self.0 == *rhs - } -} - -impl PartialEq for RealmRef { - fn eq(&self, rhs: &str) -> bool { - self.0 == *rhs - } -} - -impl PartialEq<&str> for RealmRef { - fn eq(&self, rhs: &&str) -> bool { - self.0 == **rhs - } -} - -impl PartialEq for Realm { - fn eq(&self, rhs: &RealmRef) -> bool { - self.0 == rhs.0 - } -} - -impl PartialEq for RealmRef { - fn eq(&self, rhs: &Realm) -> bool { - self.0 == rhs.0 - } -} - -impl PartialEq for &RealmRef { - fn eq(&self, rhs: &Realm) -> bool { - self.0 == rhs.0 - } -} - -#[api( - type: String, - format: &PROXMOX_TOKEN_NAME_FORMAT, -)] -/// The token ID part of an API token authentication id. -/// -/// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases. -#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq, Deserialize, Serialize)] -pub struct Tokenname(String); - -/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify -/// the user. -/// -/// This is like a `str` to the `String` of a [`Tokenname`]. -#[derive(Debug, Hash)] -pub struct TokennameRef(str); - -#[doc(hidden)] -/// ```compile_fail -/// let a: Username = unsafe { std::mem::zeroed() }; -/// let b: Username = unsafe { std::mem::zeroed() }; -/// let _ = ::eq(&a, &b); -/// ``` -/// -/// ```compile_fail -/// let a: &UsernameRef = unsafe { std::mem::zeroed() }; -/// let b: &UsernameRef = unsafe { std::mem::zeroed() }; -/// let _ = <&UsernameRef as PartialEq>::eq(a, b); -/// ``` -/// -/// ```compile_fail -/// let a: &UsernameRef = unsafe { std::mem::zeroed() }; -/// let b: &UsernameRef = unsafe { std::mem::zeroed() }; -/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b); -/// ``` -struct _AssertNoEqImpl; - -impl TokennameRef { - fn new(s: &str) -> &Self { - unsafe { &*(s as *const str as *const TokennameRef) } - } - - pub fn as_str(&self) -> &str { - &self.0 - } -} - -impl std::ops::Deref for Tokenname { - type Target = TokennameRef; - - fn deref(&self) -> &TokennameRef { - self.borrow() - } -} - -impl Borrow for Tokenname { - fn borrow(&self) -> &TokennameRef { - TokennameRef::new(self.0.as_str()) - } -} - -impl AsRef for Tokenname { - fn as_ref(&self) -> &TokennameRef { - self.borrow() - } -} - -impl ToOwned for TokennameRef { - type Owned = Tokenname; - - fn to_owned(&self) -> Self::Owned { - Tokenname(self.0.to_owned()) - } -} - -impl TryFrom for Tokenname { - type Error = Error; - - fn try_from(s: String) -> Result { - if !PROXMOX_TOKEN_NAME_REGEX.is_match(&s) { - bail!("invalid token name"); - } - - Ok(Self(s)) - } -} - -impl<'a> TryFrom<&'a str> for &'a TokennameRef { - type Error = Error; - - fn try_from(s: &'a str) -> Result<&'a TokennameRef, Error> { - if !PROXMOX_TOKEN_NAME_REGEX.is_match(s) { - bail!("invalid token name in user id"); - } - - Ok(TokennameRef::new(s)) - } -} - -/// A complete user id consisting of a user name and a realm -#[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, UpdaterType)] -pub struct Userid { - data: String, - name_len: usize, -} - -impl ApiType for Userid { - const API_SCHEMA: Schema = StringSchema::new("User ID") - .format(&PROXMOX_USER_ID_FORMAT) - .min_length(3) - .max_length(64) - .schema(); -} - -impl Userid { - const fn new(data: String, name_len: usize) -> Self { - Self { data, name_len } - } - - pub fn name(&self) -> &UsernameRef { - UsernameRef::new(&self.data[..self.name_len]) - } - - pub fn realm(&self) -> &RealmRef { - RealmRef::new(&self.data[(self.name_len + 1)..]) - } - - pub fn as_str(&self) -> &str { - &self.data - } - - /// Get the "root@pam" user id. - pub fn root_userid() -> &'static Self { - &ROOT_USERID - } -} - -lazy_static! { - pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4); -} - -impl From for Userid { - fn from(authid: Authid) -> Self { - authid.user - } -} - -impl From<(Username, Realm)> for Userid { - fn from(parts: (Username, Realm)) -> Self { - Self::from((parts.0.as_ref(), parts.1.as_ref())) - } -} - -impl From<(&UsernameRef, &RealmRef)> for Userid { - fn from(parts: (&UsernameRef, &RealmRef)) -> Self { - let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str()); - let name_len = parts.0.as_str().len(); - Self { data, name_len } - } -} - -impl fmt::Display for Userid { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.data.fmt(f) - } -} - -impl std::str::FromStr for Userid { - type Err = Error; - - fn from_str(id: &str) -> Result { - let name_len = id - .as_bytes() - .iter() - .rposition(|&b| b == b'@') - .ok_or_else(|| format_err!("not a valid user id"))?; - - let name = &id[..name_len]; - let realm = &id[(name_len + 1)..]; - - if !PROXMOX_USER_NAME_REGEX.is_match(name) { - bail!("invalid user name in user id"); - } - - PROXMOX_AUTH_REALM_STRING_SCHEMA - .check_constraints(realm) - .map_err(|_| format_err!("invalid realm in user id"))?; - - Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm)))) - } -} - -impl TryFrom for Userid { - type Error = Error; - - fn try_from(data: String) -> Result { - let name_len = data - .as_bytes() - .iter() - .rposition(|&b| b == b'@') - .ok_or_else(|| format_err!("not a valid user id"))?; - - if !PROXMOX_USER_NAME_REGEX.is_match(&data[..name_len]) { - bail!("invalid user name in user id"); - } - - PROXMOX_AUTH_REALM_STRING_SCHEMA - .check_constraints(&data[(name_len + 1)..]) - .map_err(|_| format_err!("invalid realm in user id"))?; - - Ok(Self { data, name_len }) - } -} - -impl PartialEq for Userid { - fn eq(&self, rhs: &str) -> bool { - self.data == *rhs - } -} - -impl PartialEq<&str> for Userid { - fn eq(&self, rhs: &&str) -> bool { - *self == **rhs - } -} - -impl PartialEq for Userid { - fn eq(&self, rhs: &String) -> bool { - self == rhs.as_str() - } -} - -/// A complete authentication id consisting of a user id and an optional token name. -#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType, Ord, PartialOrd)] -pub struct Authid { - user: Userid, - tokenname: Option, -} - -impl ApiType for Authid { - const API_SCHEMA: Schema = StringSchema::new("Authentication ID") - .format(&PROXMOX_AUTH_ID_FORMAT) - .min_length(3) - .max_length(64) - .schema(); -} - -impl Authid { - const fn new(user: Userid, tokenname: Option) -> Self { - Self { user, tokenname } - } - - pub fn user(&self) -> &Userid { - &self.user - } - - pub fn is_token(&self) -> bool { - self.tokenname.is_some() - } - - pub fn tokenname(&self) -> Option<&TokennameRef> { - self.tokenname.as_deref() - } - - /// Get the "root@pam" auth id. - pub fn root_auth_id() -> &'static Self { - &ROOT_AUTHID - } -} - -lazy_static! { - pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4)); -} - -impl From for Authid { - fn from(parts: Userid) -> Self { - Self::new(parts, None) - } -} - -impl From<(Userid, Option)> for Authid { - fn from(parts: (Userid, Option)) -> Self { - Self::new(parts.0, parts.1) - } -} - -impl fmt::Display for Authid { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match &self.tokenname { - Some(token) => write!(f, "{}!{}", self.user, token.as_str()), - None => self.user.fmt(f), - } - } -} - -impl std::str::FromStr for Authid { - type Err = Error; - - fn from_str(id: &str) -> Result { - let name_len = id - .as_bytes() - .iter() - .rposition(|&b| b == b'@') - .ok_or_else(|| format_err!("not a valid user id"))?; - - let realm_end = id - .as_bytes() - .iter() - .rposition(|&b| b == b'!') - .map(|pos| if pos < name_len { id.len() } else { pos }) - .unwrap_or_else(|| id.len()); - - if realm_end == id.len() - 1 { - bail!("empty token name in userid"); - } - - let user = Userid::from_str(&id[..realm_end])?; - - if id.len() > realm_end { - let token = Tokenname::try_from(id[(realm_end + 1)..].to_string())?; - Ok(Self::new(user, Some(token))) - } else { - Ok(Self::new(user, None)) - } - } -} - -impl TryFrom for Authid { - type Error = Error; - - fn try_from(mut data: String) -> Result { - let name_len = data - .as_bytes() - .iter() - .rposition(|&b| b == b'@') - .ok_or_else(|| format_err!("not a valid user id"))?; - - let realm_end = data - .as_bytes() - .iter() - .rposition(|&b| b == b'!') - .map(|pos| if pos < name_len { data.len() } else { pos }) - .unwrap_or_else(|| data.len()); - - if realm_end == data.len() - 1 { - bail!("empty token name in userid"); - } - - let tokenname = if data.len() > realm_end { - Some(Tokenname::try_from(data[(realm_end + 1)..].to_string())?) - } else { - None - }; - - data.truncate(realm_end); - - let user: Userid = data.parse()?; - - Ok(Self { user, tokenname }) - } -} - -#[test] -fn test_token_id() { - let userid: Userid = "test@pam".parse().expect("parsing Userid failed"); - assert_eq!(userid.name().as_str(), "test"); - assert_eq!(userid.realm(), "pam"); - assert_eq!(userid, "test@pam"); - - let auth_id: Authid = "test@pam".parse().expect("parsing user Authid failed"); - assert_eq!(auth_id.to_string(), "test@pam".to_string()); - assert!(!auth_id.is_token()); - - assert_eq!(auth_id.user(), &userid); - - let user_auth_id = Authid::from(userid.clone()); - assert_eq!(user_auth_id, auth_id); - assert!(!user_auth_id.is_token()); - - let auth_id: Authid = "test@pam!bar".parse().expect("parsing token Authid failed"); - let token_userid = auth_id.user(); - assert_eq!(&userid, token_userid); - assert!(auth_id.is_token()); - assert_eq!( - auth_id.tokenname().expect("Token has tokenname").as_str(), - TokennameRef::new("bar").as_str() - ); - assert_eq!(auth_id.to_string(), "test@pam!bar".to_string()); -} - -proxmox_serde::forward_deserialize_to_from_str!(Userid); -proxmox_serde::forward_serialize_to_display!(Userid); - -proxmox_serde::forward_deserialize_to_from_str!(Authid); -proxmox_serde::forward_serialize_to_display!(Authid); From 8e3e83318b73e54c34c734f5017fbe7d3b187a03 Mon Sep 17 00:00:00 2001 From: Stefan Sterz Date: Thu, 10 Aug 2023 17:37:04 +0200 Subject: [PATCH 203/299] fix: api-types: add support for lto 9 tape density lto 9 tapes have a new density code which leads to these tapes not being recognized properly. add the new density code and TapeDensity to improve lto 9 support. since the documentation states that we support lto 5 and above this constitutes a bug fix for lto 9 support. Signed-off-by: Stefan Sterz --- pbs-api-types/src/tape/drive.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index d841505a..ea2cbbd8 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -130,6 +130,8 @@ pub enum TapeDensity { LTO7M8, /// LTO8 LTO8, + /// LTO9 + LTO9, } impl TryFrom for TapeDensity { @@ -147,6 +149,7 @@ impl TryFrom for TapeDensity { 0x5c => TapeDensity::LTO7, 0x5d => TapeDensity::LTO7M8, 0x5e => TapeDensity::LTO8, + 0x60 => TapeDensity::LTO9, _ => bail!("unknown tape density code 0x{:02x}", value), }; Ok(density) From 51bae22b4d6e65387abb529f083e482e6be9ece7 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 30 Aug 2023 13:34:51 +0200 Subject: [PATCH 204/299] TaskListItem: derive Clone and PartialEq --- pbs-api-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4764c51a..508ee111 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -432,7 +432,7 @@ pub enum TaskStateType { upid: { schema: UPID::API_SCHEMA }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] /// Task properties. pub struct TaskListItem { pub upid: String, From c8797043373d1032dc1a29971edf3f2c9e60baae Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sat, 2 Sep 2023 16:24:18 +0200 Subject: [PATCH 205/299] AclListItem: derive Clone and PartialEq Signed-off-by: Dietmar Maurer --- pbs-api-types/src/acl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index 3142f6fc..8bbd2958 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -280,7 +280,7 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' prope } } )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] /// ACL list entry. pub struct AclListItem { pub path: String, From ee4d9a55673fca07e3779530b7df85275370fa7a Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 27 Sep 2023 18:11:58 +0200 Subject: [PATCH 206/299] pbs-api-types: move node status types from src/api2/types/mod.rs Signed-off-by: Dietmar Maurer --- pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/node.rs | 100 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 pbs-api-types/src/node.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 508ee111..4306eca3 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -84,6 +84,9 @@ pub use maintenance::*; mod network; pub use network::*; +mod node; +pub use node::*; + pub use proxmox_auth_api::types as userid; pub use proxmox_auth_api::types::{Authid, Userid}; pub use proxmox_auth_api::types::{Realm, RealmRef}; diff --git a/pbs-api-types/src/node.rs b/pbs-api-types/src/node.rs new file mode 100644 index 00000000..704215bb --- /dev/null +++ b/pbs-api-types/src/node.rs @@ -0,0 +1,100 @@ +use serde::{Deserialize, Serialize}; +use proxmox_schema::*; + +use crate::StorageStatus; + + +#[api] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Node memory usage counters +pub struct NodeMemoryCounters { + /// Total memory + pub total: u64, + /// Used memory + pub used: u64, + /// Free memory + pub free: u64, +} + +#[api] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Node swap usage counters +pub struct NodeSwapCounters { + /// Total swap + pub total: u64, + /// Used swap + pub used: u64, + /// Free swap + pub free: u64, +} + +#[api] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Contains general node information such as the fingerprint` +pub struct NodeInformation { + /// The SSL Fingerprint + pub fingerprint: String, +} + +#[api] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// Information about the CPU +pub struct NodeCpuInformation { + /// The CPU model + pub model: String, + /// The number of CPU sockets + pub sockets: usize, + /// The number of CPU cores (incl. threads) + pub cpus: usize, +} + +#[api( + properties: { + memory: { + type: NodeMemoryCounters, + }, + root: { + type: StorageStatus, + }, + swap: { + type: NodeSwapCounters, + }, + loadavg: { + type: Array, + items: { + type: Number, + description: "the load", + } + }, + cpuinfo: { + type: NodeCpuInformation, + }, + info: { + type: NodeInformation, + } + }, +)] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +/// The Node status +pub struct NodeStatus { + pub memory: NodeMemoryCounters, + pub root: StorageStatus, + pub swap: NodeSwapCounters, + /// The current uptime of the server. + pub uptime: u64, + /// Load for 1, 5 and 15 minutes. + pub loadavg: [f64; 3], + /// The current kernel version. + pub kversion: String, + /// Total CPU usage since last query. + pub cpu: f64, + /// Total IO wait since last query. + pub wait: f64, + pub cpuinfo: NodeCpuInformation, + pub info: NodeInformation, +} From 3b42bca41053b8dd0fc37e328ead6b6f0eb20ab3 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 9 Oct 2023 08:17:46 +0200 Subject: [PATCH 207/299] move MetricServerInfo definition to pbs-api-types And derive Clone, Eq and Ord so that we can sort the list in the GUI. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/metrics.rs | 43 ++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index f5cfe95d..be5ff27a 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -146,3 +146,46 @@ pub struct InfluxDbHttp { #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, } + + +#[api] +#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)] +/// Type of the metric server +pub enum MetricServerType { + /// InfluxDB HTTP + #[serde(rename = "influxdb-http")] + InfluxDbHttp, + /// InfluxDB UDP + #[serde(rename = "influxdb-udp")] + InfluxDbUdp, +} + +#[api( + properties: { + name: { + schema: METRIC_SERVER_ID_SCHEMA, + }, + "type": { + type: MetricServerType, + }, + comment: { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + }, +)] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +#[serde(rename_all = "kebab-case")] +/// Basic information about a metric server thats available for all types +pub struct MetricServerInfo { + pub name: String, + #[serde(rename = "type")] + pub ty: MetricServerType, + /// Enables or disables the metrics server + #[serde(skip_serializing_if = "Option::is_none")] + pub enable: Option, + /// The target server + pub server: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, +} From 1fd995c54cb693701f117f7b01ae53c5d7024b88 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 11 Oct 2023 11:37:15 +0200 Subject: [PATCH 208/299] Interface: derive Clone + PartialEq (for GUI) Signed-off-by: Dietmar Maurer --- pbs-api-types/src/network.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index 361c20e4..8e2897b5 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -238,7 +238,7 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = }, } )] -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] /// Network Interface configuration pub struct Interface { /// Autostart interface From 9404f0ff9f777603b491cfb4431cd0bc447e62b9 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 11 Oct 2023 11:38:01 +0200 Subject: [PATCH 209/299] Interface: fix deserialize (add default) Signed-off-by: Dietmar Maurer --- pbs-api-types/src/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index 8e2897b5..a8294a59 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -268,9 +268,9 @@ pub struct Interface { /// IPv6 gateway pub gateway6: Option, - #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub options: Vec, - #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub options6: Vec, #[serde(skip_serializing_if = "Option::is_none")] From 235cde7f03e067429a0b265b324918b8ead188db Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 12 Oct 2023 12:39:18 +0200 Subject: [PATCH 210/299] Interface: add missing serde skip_serializing_if to bond_xmit_hash_policy Signed-off-by: Dietmar Maurer --- pbs-api-types/src/network.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index a8294a59..e3a5e481 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -295,6 +295,7 @@ pub struct Interface { #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "bond-primary")] pub bond_primary: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub bond_xmit_hash_policy: Option, } From d6dab3cf2424aacdc6b125f50b32172a06d16c12 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 15 Oct 2023 09:25:26 +0200 Subject: [PATCH 211/299] TrafficControlRule: derive Clone and PartialEq (for GUI) --- pbs-api-types/src/traffic_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 24195e44..7a0dedd2 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -100,7 +100,7 @@ impl RateLimitConfig { }, }, )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)] #[serde(rename_all = "kebab-case")] /// Traffic control rule pub struct TrafficControlRule { From a2234de54ab6b02e6e0c48c05201338699648e20 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 15 Oct 2023 11:07:30 +0200 Subject: [PATCH 212/299] move TrafficControlCurrentRate to pbs-api-types Signed-off-by: Dietmar Maurer --- pbs-api-types/src/traffic_control.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 7a0dedd2..c0f3b314 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -120,3 +120,22 @@ pub struct TrafficControlRule { #[serde(skip_serializing_if = "Option::is_none")] pub timeframe: Option>, } + +#[api( + properties: { + config: { + type: TrafficControlRule, + }, + }, +)] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Traffic control rule config with current rates +pub struct TrafficControlCurrentRate { + #[serde(flatten)] + pub config: TrafficControlRule, + /// Current ingress rate in bytes/second + pub cur_rate_in: u64, + /// Current egress rate in bytes/second + pub cur_rate_out: u64, +} From a5f67f200dc5df1608cee0e53cfd6ef9d3fb3748 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 15 Oct 2023 11:09:42 +0200 Subject: [PATCH 213/299] TrafficControlCurrentRate: derive Clone and PartalEq (for GUI) Signed-off-by: Dietmar Maurer --- pbs-api-types/src/traffic_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index c0f3b314..c97d9ef8 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -128,7 +128,7 @@ pub struct TrafficControlRule { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] /// Traffic control rule config with current rates pub struct TrafficControlCurrentRate { From ecca38b94b93988687ac47a1f8c04fd3aa75150b Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 12 Nov 2023 11:23:56 +0100 Subject: [PATCH 214/299] DatastoteNotify: fix serde attributes Signed-off-by: Dietmar Maurer --- pbs-api-types/src/jobs.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 23e19b7b..5e0b215a 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -138,12 +138,16 @@ pub enum Notify { /// Datastore notify settings pub struct DatastoreNotify { /// Garbage collection settings + #[serde(skip_serializing_if = "Option::is_none")] pub gc: Option, /// Verify job setting + #[serde(skip_serializing_if = "Option::is_none")] pub verify: Option, /// Sync job setting + #[serde(skip_serializing_if = "Option::is_none")] pub sync: Option, /// Prune job setting + #[serde(skip_serializing_if = "Option::is_none")] pub prune: Option, } From 7cb2d72b97a662cb5dbdb94086d17ee433af1453 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sun, 12 Nov 2023 12:02:19 +0100 Subject: [PATCH 215/299] DatastoreTuning: fix serde attributes Signed-off-by: Dietmar Maurer --- pbs-api-types/src/datastore.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 73c4890e..db9faa2c 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -213,7 +213,9 @@ pub enum DatastoreFSyncLevel { /// Datastore tuning options pub struct DatastoreTuning { /// Iterate chunks in this order + #[serde(skip_serializing_if = "Option::is_none")] pub chunk_order: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub sync_level: Option, } From 3fca8ef10d50878eafeef4dc4e16c26a226364c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 4 Jul 2023 11:45:05 +0200 Subject: [PATCH 216/299] apt: use `apt changelog` for changelog fetching MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit support for it got added to Proxmox repositories, so there is no need to use custom logic and manual fetching for this anymore. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 4306eca3..ebd5550d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -398,8 +398,6 @@ pub struct APTUpdateInfo { pub priority: String, /// Package section pub section: String, - /// URL under which the package's changelog can be retrieved - pub change_log_url: String, /// Custom extra field for additional package information #[serde(skip_serializing_if = "Option::is_none")] pub extra_info: Option, From dca6c270a0df05a70364880c5ba21e77cb59ca10 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 15 Nov 2023 16:22:21 +0100 Subject: [PATCH 217/299] pbs-api-types: derive Clone and PartialEq for job config/status types (for GUI) Signed-off-by: Dietmar Maurer --- pbs-api-types/src/jobs.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 5e0b215a..f3e64ee4 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -203,7 +203,7 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = }, } )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Verification Job pub struct VerificationJobConfig { @@ -252,7 +252,7 @@ impl VerificationJobConfig { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Status of Verification Job pub struct VerificationJobStatus { @@ -306,7 +306,7 @@ pub struct VerificationJobStatus { }, } )] -#[derive(Serialize, Deserialize, Clone, Updater)] +#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] #[serde(rename_all = "kebab-case")] /// Tape Backup Job Setup pub struct TapeBackupJobSetup { @@ -348,7 +348,7 @@ pub struct TapeBackupJobSetup { }, } )] -#[derive(Serialize, Deserialize, Clone, Updater)] +#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] #[serde(rename_all = "kebab-case")] /// Tape Backup Job pub struct TapeBackupJobConfig { @@ -372,7 +372,7 @@ pub struct TapeBackupJobConfig { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Status of Tape Backup Job pub struct TapeBackupJobStatus { @@ -643,7 +643,7 @@ impl KeepOptions { }, } )] -#[derive(Serialize, Deserialize, Default, Updater)] +#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Common pruning options pub struct PruneJobOptions { @@ -697,7 +697,7 @@ impl PruneJobOptions { }, }, )] -#[derive(Deserialize, Serialize, Updater)] +#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Prune configuration. pub struct PruneJobConfig { @@ -741,7 +741,7 @@ fn is_false(b: &bool) -> bool { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Status of prune job pub struct PruneJobStatus { From 486415f5175ae43f021de59c23edd5eaa65efda0 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 16 Nov 2023 17:45:09 +0100 Subject: [PATCH 218/299] PruneJobConfig: remove stale optional flag from the API macro. The property is not optional - it is defined as "String". Signed-off-by: Dietmar Maurer --- pbs-api-types/src/jobs.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index f3e64ee4..7b5a1f3d 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -686,7 +686,6 @@ impl PruneJobOptions { }, schedule: { schema: PRUNE_SCHEDULE_SCHEMA, - optional: true, }, comment: { optional: true, From 56575dfc628c685b19203293d0fca1aa384d6822 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 23 Nov 2023 09:52:05 +0100 Subject: [PATCH 219/299] pbs-api-types: derive Clone and PartialEq for BackupContent, SnapshotVerifyState, SnapshotListItem and GroupListItem Signed-off-by: Dietmar Maurer --- pbs-api-types/src/datastore.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index db9faa2c..1f619c9d 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -378,7 +378,7 @@ pub struct DataStoreListItem { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic information about archive files inside a backup snapshot. pub struct BackupContent { @@ -412,7 +412,7 @@ pub enum VerifyState { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] /// Task properties. pub struct SnapshotVerifyState { /// UPID of the verify task @@ -1076,7 +1076,7 @@ impl std::str::FromStr for BackupPart { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic information about backup snapshot. pub struct SnapshotListItem { @@ -1122,7 +1122,7 @@ pub struct SnapshotListItem { }, }, )] -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] /// Basic information about a backup group. pub struct GroupListItem { From bf9b3e7ac0af210406608b49fe253e408af241ea Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 21 Nov 2023 15:31:54 +0100 Subject: [PATCH 220/299] api: make Remote for SyncJob optional Signed-off-by: Hannes Laimer Reviewed-by: Lukas Wagner Tested-by: Lukas Wagner Tested-by: Tested-by: Gabriel Goller --- pbs-api-types/src/jobs.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 7b5a1f3d..b8640216 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -17,8 +17,8 @@ const_regex! { /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); - /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' - pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); + /// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' + pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); } pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") @@ -471,6 +471,7 @@ pub const TRANSFER_LAST_SCHEMA: Schema = }, remote: { schema: REMOTE_ID_SCHEMA, + optional: true, }, "remote-store": { schema: DATASTORE_SCHEMA, @@ -519,7 +520,9 @@ pub struct SyncJobConfig { pub ns: Option, #[serde(skip_serializing_if = "Option::is_none")] pub owner: Option, - pub remote: String, + #[serde(skip_serializing_if = "Option::is_none")] + /// None implies local sync. + pub remote: Option, pub remote_store: String, #[serde(skip_serializing_if = "Option::is_none")] pub remote_ns: Option, From b01c0f572bde62c26b4b74302f325c935d2564be Mon Sep 17 00:00:00 2001 From: Maximiliano Sandoval R Date: Mon, 30 Oct 2023 13:27:48 +0100 Subject: [PATCH 221/299] fix-3211: manager: Document --notify argument Signed-off-by: Maximiliano Sandoval R --- pbs-api-types/src/jobs.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index b8640216..1f5b3cf1 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -151,12 +151,13 @@ pub struct DatastoreNotify { pub prune: Option, } -pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = - StringSchema::new("Datastore notification setting") - .format(&ApiStringFormat::PropertyString( - &DatastoreNotify::API_SCHEMA, - )) - .schema(); +pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( + "Datastore notification setting, enum can be one of 'always', 'never', or 'error'.", +) +.format(&ApiStringFormat::PropertyString( + &DatastoreNotify::API_SCHEMA, +)) +.schema(); pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( "Do not verify backups that are already verified if their verification is not outdated.", From d10394fc6620fb6373cc803892571ba057e27edf Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 27 Nov 2023 15:37:35 +0100 Subject: [PATCH 222/299] tree-wide: run cargo fmt Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/metrics.rs | 1 - pbs-api-types/src/node.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index be5ff27a..99df88a7 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -147,7 +147,6 @@ pub struct InfluxDbHttp { pub comment: Option, } - #[api] #[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)] /// Type of the metric server diff --git a/pbs-api-types/src/node.rs b/pbs-api-types/src/node.rs index 704215bb..9033d586 100644 --- a/pbs-api-types/src/node.rs +++ b/pbs-api-types/src/node.rs @@ -1,9 +1,8 @@ -use serde::{Deserialize, Serialize}; use proxmox_schema::*; +use serde::{Deserialize, Serialize}; use crate::StorageStatus; - #[api] #[derive(Serialize, Deserialize, Default)] #[serde(rename_all = "kebab-case")] From 25d26d83b137bb4b72d3c1c53966ddc3f20a9ad7 Mon Sep 17 00:00:00 2001 From: Markus Frank Date: Tue, 28 Nov 2023 14:23:18 +0100 Subject: [PATCH 223/299] api types: add regex, format & schema for partition names The new regex is similar to BLOCKDEVICE_NAME_REGEX but also allows numbers at the end of the device name (also allows partitions names). For nvme partitions it also allows the letter p and a number. Signed-off-by: Markus Frank Reviewed-by: Lukas Wagner Tested-by: Lukas Wagner Reviewed-by: Max Carrara Tested-by: Max Carrara --- pbs-api-types/src/lib.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index ebd5550d..795ff2a6 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -191,6 +191,7 @@ const_regex! { ); pub BLOCKDEVICE_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+)|(?:nvme\d+n\d+)$"; + pub BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+\d*)|(?:nvme\d+n\d+(p\d+)?)$"; pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); } @@ -205,6 +206,8 @@ pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_ pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); +pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT: ApiStringFormat = + ApiStringFormat::Pattern(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX); pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = @@ -285,6 +288,13 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = .max_length(64) .schema(); +pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA: Schema = + StringSchema::new("(Partition) block device name (/sys/class/block/).") + .format(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT) + .min_length(3) + .max_length(64) + .schema(); + pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema(); From 5d77ea0cd2fc449be3bc0943321c15a77c0eb1a6 Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Wed, 29 Nov 2023 14:28:58 +0100 Subject: [PATCH 224/299] node: status: added bootmode Added field that shows the bootmode of the node. The bootmode is either Legacy Bios, EFI, or EFI (Secure Boot). To detect the mode we use the exact same method as in pve: We check if the `/sys/firmware/efi` folder exists, then check if the `SecureBoot-xx...` file in the `efivars` directory has the SecureBoot flag enabled. Signed-off-by: Gabriel Goller Tested-by: Lukas Wagner --- pbs-api-types/src/node.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/node.rs b/pbs-api-types/src/node.rs index 9033d586..ab626157 100644 --- a/pbs-api-types/src/node.rs +++ b/pbs-api-types/src/node.rs @@ -38,6 +38,29 @@ pub struct NodeInformation { pub fingerprint: String, } + +#[api] +#[derive(Serialize, Deserialize, Copy, Clone)] +#[serde(rename_all = "kebab-case")] +/// The possible BootModes +pub enum BootMode { + /// The BootMode is EFI/UEFI + Efi, + /// The BootMode is Legacy BIOS + LegacyBios, +} + +#[api] +#[derive(Serialize, Deserialize, Clone)] +#[serde(rename_all = "lowercase")] +/// Holds the Bootmodes +pub struct BootModeInformation { + /// The BootMode, either Efi or Bios + pub mode: BootMode, + /// SecureBoot status + pub secureboot: bool, +} + #[api] #[derive(Serialize, Deserialize, Default)] #[serde(rename_all = "kebab-case")] @@ -77,7 +100,7 @@ pub struct NodeCpuInformation { } }, )] -#[derive(Serialize, Deserialize, Default)] +#[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// The Node status pub struct NodeStatus { @@ -96,4 +119,6 @@ pub struct NodeStatus { pub wait: f64, pub cpuinfo: NodeCpuInformation, pub info: NodeInformation, + /// Current boot mode + pub boot_info: BootModeInformation, } From 325dbbc97f53e1ce459ff7ac50c5b827bad2114c Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Wed, 29 Nov 2023 14:29:00 +0100 Subject: [PATCH 225/299] node: status: declutter kernel-version Return a struct with all the components of the kernel version like it has been done in pve. Also return the legacy `kversion` to keep backwards compat. Signed-off-by: Gabriel Goller Tested-by: Lukas Wagner --- pbs-api-types/src/node.rs | 40 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/node.rs b/pbs-api-types/src/node.rs index ab626157..c4e9a179 100644 --- a/pbs-api-types/src/node.rs +++ b/pbs-api-types/src/node.rs @@ -1,3 +1,5 @@ +use std::ffi::OsStr; + use proxmox_schema::*; use serde::{Deserialize, Serialize}; @@ -38,6 +40,40 @@ pub struct NodeInformation { pub fingerprint: String, } +#[api] +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "lowercase")] +/// The current kernel version (output of `uname`) +pub struct KernelVersionInformation { + /// The systemname/nodename + pub sysname: String, + /// The kernel release number + pub release: String, + /// The kernel version + pub version: String, + /// The machine architecture + pub machine: String, +} + +impl KernelVersionInformation { + pub fn from_uname_parts( + sysname: &OsStr, + release: &OsStr, + version: &OsStr, + machine: &OsStr, + ) -> Self { + KernelVersionInformation { + sysname: sysname.to_str().map(String::from).unwrap_or_default(), + release: release.to_str().map(String::from).unwrap_or_default(), + version: version.to_str().map(String::from).unwrap_or_default(), + machine: machine.to_str().map(String::from).unwrap_or_default(), + } + } + + pub fn get_legacy(&self) -> String { + format!("{} {} {}", self.sysname, self.release, self.version) + } +} #[api] #[derive(Serialize, Deserialize, Copy, Clone)] @@ -111,7 +147,9 @@ pub struct NodeStatus { pub uptime: u64, /// Load for 1, 5 and 15 minutes. pub loadavg: [f64; 3], - /// The current kernel version. + /// The current kernel version (NEW struct type). + pub current_kernel: KernelVersionInformation, + /// The current kernel version (LEGACY string type). pub kversion: String, /// Total CPU usage since last query. pub cpu: f64, From ea95d57759f25799a5c5e63265d1aa07123c5dbf Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Wed, 29 Nov 2023 18:32:06 +0100 Subject: [PATCH 226/299] tree-wide: fix various typos found with codespell Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 2 +- pbs-api-types/src/metrics.rs | 2 +- pbs-api-types/src/traffic_control.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 1f619c9d..d4ead1d1 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -195,7 +195,7 @@ pub enum DatastoreFSyncLevel { /// while reducing the impact on many file systems in contrast to the file level sync. /// Depending on the setup, it might have a negative impact on unrelated write operations /// of the underlying filesystem, but it is generally a good compromise between performance - /// and consitency. + /// and consistency. #[default] Filesystem, } diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index 99df88a7..6800c23b 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -175,7 +175,7 @@ pub enum MetricServerType { )] #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] #[serde(rename_all = "kebab-case")] -/// Basic information about a metric server thats available for all types +/// Basic information about a metric server that's available for all types pub struct MetricServerInfo { pub name: String, #[serde(rename = "type")] diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index c97d9ef8..fb264531 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -8,7 +8,7 @@ use crate::{ }; pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = - StringSchema::new("Timeframe to specify when the rule is actice.") + StringSchema::new("Timeframe to specify when the rule is active.") .format(&DAILY_DURATION_FORMAT) .schema(); From dc53be1b9acf239167e7e7ab8dbd1d7ec91bf646 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 6 Dec 2023 15:53:57 +0100 Subject: [PATCH 227/299] api-types: add a missing serde(default) Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/remote.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs index e7912ee0..0d5c9701 100644 --- a/pbs-api-types/src/remote.rs +++ b/pbs-api-types/src/remote.rs @@ -79,7 +79,7 @@ pub struct RemoteConfig { pub struct Remote { pub name: String, // Note: The stored password is base64 encoded - #[serde(skip_serializing_if = "String::is_empty")] + #[serde(default, skip_serializing_if = "String::is_empty")] #[serde(with = "proxmox_serde::string_as_base64")] pub password: String, #[serde(flatten)] From 4abc2ec487017d50bbd3848068dfbfd2d481b045 Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Mon, 11 Dec 2023 09:59:02 +0100 Subject: [PATCH 228/299] status: use Option on avail/used datastore attrs Instead of returning -1 if we can't get the attributes, we use an Option which will not be serialized on `None`. Signed-off-by: Gabriel Goller --- pbs-api-types/src/datastore.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index d4ead1d1..74f610d1 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1302,12 +1302,15 @@ pub struct DataStoreStatus { /// Status of a Datastore pub struct DataStoreStatusListItem { pub store: String, - /// The Size of the underlying storage in bytes. (-1 on error) - pub total: i64, - /// The used bytes of the underlying storage. (-1 on error) - pub used: i64, + /// The Size of the underlying storage in bytes. + #[serde(skip_serializing_if = "Option::is_none")] + pub total: Option, + /// The used bytes of the underlying storage. + #[serde(skip_serializing_if = "Option::is_none")] + pub used: Option, /// The available bytes of the underlying storage. (-1 on error) - pub avail: i64, + #[serde(skip_serializing_if = "Option::is_none")] + pub avail: Option, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, @@ -1335,9 +1338,9 @@ impl DataStoreStatusListItem { pub fn empty(store: &str, err: Option) -> Self { DataStoreStatusListItem { store: store.to_owned(), - total: -1, - used: -1, - avail: -1, + total: None, + used: None, + avail: None, history: None, history_start: None, history_delta: None, From e9283e93e7aad0e0b8ecc915d708db22566432fe Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 12 Dec 2023 12:32:46 +0100 Subject: [PATCH 229/299] tape: derive PartialEq and PartialOrd for TapeDensity so that we can compare more easily Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/drive.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index ea2cbbd8..0217704b 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -108,7 +108,7 @@ pub struct MamAttribute { } #[api()] -#[derive(Serialize, Deserialize, Copy, Clone, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)] pub enum TapeDensity { /// Unknown (no media loaded) Unknown, From a82bcf8ad1c781d72230849df3789c0e9adb3fb5 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 7 Dec 2023 13:51:01 +0100 Subject: [PATCH 230/299] tape: changer: save whole LtoTapeDrive config in MtxMediaChanger we'll need more info from there in the future, so derive clone for it and save the whole config instead of adding an additional field. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/drive.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index 0217704b..626c5d9c 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -59,7 +59,7 @@ pub struct VirtualTapeDrive { }, } )] -#[derive(Serialize, Deserialize, Updater)] +#[derive(Serialize, Deserialize, Updater, Clone)] #[serde(rename_all = "kebab-case")] /// Lto SCSI tape driver pub struct LtoTapeDrive { From 99f24b2079bd42de1b603ab04bf600225912241d Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 7 Dec 2023 13:51:02 +0100 Subject: [PATCH 231/299] fix #4904: tape changer: add option to eject before unload some tape libraries need the tape being ejected from the drive before doing an unload. Since we cannot easily detect if that's the case, introduce an 'eject_before_unload' option. Instead of just adding a bool flag to the config, add a new 'options' property string where we can put such niche options similar to how we handle the datastore tuning options. Extend the LtoTapeHandle with 'medium_present' which just uses a TEST UNIT READY command to check for present medium, so we don't try to eject an already ejected tape. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/changer.rs | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs index c9c7fcaa..e3cf27c1 100644 --- a/pbs-api-types/src/tape/changer.rs +++ b/pbs-api-types/src/tape/changer.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{ - api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, + api, ApiStringFormat, ApiType, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, }; use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT}; @@ -39,6 +39,26 @@ Import/Export, i.e. any media in those slots are considered to be .format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) .schema(); +fn is_false(b: &bool) -> bool { + !b +} + +#[api] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Options for Changers +pub struct ChangerOptions { + #[serde(default, skip_serializing_if = "is_false")] + /// if set to true, tapes are ejected manually before unloading + pub eject_before_unload: bool, +} + +pub const CHANGER_OPTIONS_STRING_SCHEMA: Schema = StringSchema::new("Changer options") + .format(&ApiStringFormat::PropertyString( + &ChangerOptions::API_SCHEMA, + )) + .schema(); + #[api( properties: { name: { @@ -51,6 +71,10 @@ Import/Export, i.e. any media in those slots are considered to be schema: EXPORT_SLOT_LIST_SCHEMA, optional: true, }, + options: { + optional: true, + schema: CHANGER_OPTIONS_STRING_SCHEMA, + }, }, )] #[derive(Serialize, Deserialize, Updater)] @@ -62,6 +86,8 @@ pub struct ScsiTapeChanger { pub path: String, #[serde(skip_serializing_if = "Option::is_none")] pub export_slots: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub options: Option, } #[api( From 2cfa31a218b2182ae347323b37216952d521efcf Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Wed, 13 Dec 2023 11:11:12 +0100 Subject: [PATCH 232/299] tape: fix 'eject-before-unload' api type by converting the bool into an option, otherwise having the options not set at all will fail the unload while deserializing with 'eject-before-unload is not optional' Also if we can automatically decide this in the future, we can now detect if the option was explicitely set or not. Fixes: 99f24b20 ("fix #4904: tape changer: add option to eject before unload") Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/changer.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs index e3cf27c1..9e36b12e 100644 --- a/pbs-api-types/src/tape/changer.rs +++ b/pbs-api-types/src/tape/changer.rs @@ -39,18 +39,21 @@ Import/Export, i.e. any media in those slots are considered to be .format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) .schema(); -fn is_false(b: &bool) -> bool { - !b -} - -#[api] +#[api( + properties: { + "eject-before-unload": { + optional: true, + default: false, + }, + }, +)] #[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Options for Changers pub struct ChangerOptions { - #[serde(default, skip_serializing_if = "is_false")] + #[serde(skip_serializing_if = "Option::is_none")] /// if set to true, tapes are ejected manually before unloading - pub eject_before_unload: bool, + pub eject_before_unload: Option, } pub const CHANGER_OPTIONS_STRING_SCHEMA: Schema = StringSchema::new("Changer options") From 904bef02312afd9d633cda5149e37a717cf89887 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Thu, 14 Dec 2023 10:05:19 +0100 Subject: [PATCH 233/299] tape: move 'eject-before-unload' to a plain changer config option instead of having it in a property string. For now this should be fine, and if we need many more such options, we can still move them into a property string if we want. Also update the cli command in the docs on how to set it now. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/changer.rs | 34 ++++++------------------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs index 9e36b12e..df3823cf 100644 --- a/pbs-api-types/src/tape/changer.rs +++ b/pbs-api-types/src/tape/changer.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{ - api, ApiStringFormat, ApiType, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, + api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, }; use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT}; @@ -39,29 +39,6 @@ Import/Export, i.e. any media in those slots are considered to be .format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) .schema(); -#[api( - properties: { - "eject-before-unload": { - optional: true, - default: false, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Options for Changers -pub struct ChangerOptions { - #[serde(skip_serializing_if = "Option::is_none")] - /// if set to true, tapes are ejected manually before unloading - pub eject_before_unload: Option, -} - -pub const CHANGER_OPTIONS_STRING_SCHEMA: Schema = StringSchema::new("Changer options") - .format(&ApiStringFormat::PropertyString( - &ChangerOptions::API_SCHEMA, - )) - .schema(); - #[api( properties: { name: { @@ -74,10 +51,10 @@ pub const CHANGER_OPTIONS_STRING_SCHEMA: Schema = StringSchema::new("Changer opt schema: EXPORT_SLOT_LIST_SCHEMA, optional: true, }, - options: { + "eject-before-unload": { optional: true, - schema: CHANGER_OPTIONS_STRING_SCHEMA, - }, + default: false, + } }, )] #[derive(Serialize, Deserialize, Updater)] @@ -90,7 +67,8 @@ pub struct ScsiTapeChanger { #[serde(skip_serializing_if = "Option::is_none")] pub export_slots: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub options: Option, + /// if set to true, tapes are ejected manually before unloading + pub eject_before_unload: Option, } #[api( From c7b80d5e04e8202102e7c120b78bf2063548259f Mon Sep 17 00:00:00 2001 From: Philipp Hufnagl Date: Tue, 2 Jan 2024 12:06:52 +0100 Subject: [PATCH 234/299] fix #4315: jobs: modify GroupFilter so include/exclude is tracked After some discussion I canged the include/exclude behavior to first run all include filter and after that all exclude filter (rather then allowing to alternate inbetween). This is done by splitting them into 2 lists, running include first. A lot of discussion happened how edge cases should be handled and we came to following conclusion: no include filter + no exclude filter => include all some include filter + no exclude filter => filter as always no include filter + some exclude filter => include all then exclude Since a GroupFilter now also features an behavior, the Struct has been renamed To GroupType (since simply type is a keyword). The new GroupFilter now has a behaviour as a flag 'is_exclude'. I considered calling it 'is_include' but a reader later then might not know what the opposite of 'include' is (do not include? deactivate?). I also considered making a new enum 'behaviour' but since there are only 2 values I considered it over engeneered. Signed-off-by: Philipp Hufnagl --- pbs-api-types/src/datastore.rs | 36 ++++++++++++++++++------ pbs-api-types/src/jobs.rs | 51 +++++++++++++++++++++++++--------- 2 files changed, 65 insertions(+), 22 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 74f610d1..cce9888b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -10,9 +10,9 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, MaintenanceMode, Userid, DATASTORE_NOTIFY_STRING_SCHEMA, - GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, - SINGLE_LINE_COMMENT_SCHEMA, UPID, + Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, + DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, + PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, UPID, }; const_regex! { @@ -843,19 +843,37 @@ impl BackupGroup { } pub fn matches(&self, filter: &crate::GroupFilter) -> bool { - use crate::GroupFilter; - - match filter { - GroupFilter::Group(backup_group) => { + use crate::FilterType; + match &filter.filter_type { + FilterType::Group(backup_group) => { match backup_group.parse::() { Ok(group) => *self == group, Err(_) => false, // shouldn't happen if value is schema-checked } } - GroupFilter::BackupType(ty) => self.ty == *ty, - GroupFilter::Regex(regex) => regex.is_match(&self.to_string()), + FilterType::BackupType(ty) => self.ty == *ty, + FilterType::Regex(regex) => regex.is_match(&self.to_string()), } } + + pub fn apply_filters(&self, filters: &[GroupFilter]) -> bool { + // since there will only be view filter in the list, an extra iteration to get the umber of + // include filter should not be an issue + let is_included = if filters.iter().filter(|f| !f.is_exclude).count() == 0 { + true + } else { + filters + .iter() + .filter(|f| !f.is_exclude) + .any(|filter| self.matches(filter)) + }; + + is_included + && !filters + .iter() + .filter(|f| f.is_exclude) + .any(|filter| self.matches(filter)) + } } impl AsRef for BackupGroup { diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 1f5b3cf1..607451ff 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -388,7 +388,7 @@ pub struct TapeBackupJobStatus { #[derive(Clone, Debug)] /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. -pub enum GroupFilter { +pub enum FilterType { /// BackupGroup type - either `vm`, `ct`, or `host`. BackupType(BackupType), /// Full identifier of BackupGroup, including type @@ -397,7 +397,7 @@ pub enum GroupFilter { Regex(Regex), } -impl PartialEq for GroupFilter { +impl PartialEq for FilterType { fn eq(&self, other: &Self) -> bool { match (self, other) { (Self::BackupType(a), Self::BackupType(b)) => a == b, @@ -408,27 +408,52 @@ impl PartialEq for GroupFilter { } } +#[derive(Clone, Debug)] +pub struct GroupFilter { + pub is_exclude: bool, + pub filter_type: FilterType, +} + +impl PartialEq for GroupFilter { + fn eq(&self, other: &Self) -> bool { + self.filter_type == other.filter_type && self.is_exclude == other.is_exclude + } +} + +impl Eq for GroupFilter {} + impl std::str::FromStr for GroupFilter { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - match s.split_once(':') { - Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), - Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)), - Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), + let (is_exclude, type_str) = match s.split_once(':') { + Some(("include", value)) => (false, value), + Some(("exclude", value)) => (true, value), + _ => (false, s), + }; + + let filter_type = match type_str.split_once(':') { + Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string())), + Some(("type", value)) => Ok(FilterType::BackupType(value.parse()?)), + Some(("regex", value)) => Ok(FilterType::Regex(Regex::new(value)?)), Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), None => Err(format_err!("input doesn't match expected format '|regex:REGEX>'")), - }.map_err(|err| format_err!("'{}' - {}", s, err)) + }?; + Ok(GroupFilter { + is_exclude, + filter_type, + }) } } // used for serializing below, caution! impl std::fmt::Display for GroupFilter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type), - GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group), - GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()), + let exclude = if self.is_exclude { "exclude:" } else { "" }; + match &self.filter_type { + FilterType::BackupType(backup_type) => write!(f, "{}type:{}", exclude, backup_type), + FilterType::Group(backup_group) => write!(f, "{}group:{}", exclude, backup_group), + FilterType::Regex(regex) => write!(f, "{}regex:{}", exclude, regex.as_str()), } } } @@ -441,9 +466,9 @@ fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { } pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( - "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE').") + "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE'). Can be inverted by adding 'exclude:' before.") .format(&ApiStringFormat::VerifyFn(verify_group_filter)) - .type_text("|group:GROUP|regex:RE>") + .type_text("[]|group:GROUP|regex:RE>") .schema(); pub const GROUP_FILTER_LIST_SCHEMA: Schema = From 601098729a5792285463ac302508ebd4ecf861a3 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 4 Jan 2024 10:17:02 +0100 Subject: [PATCH 235/299] fixup import grouping Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 607451ff..5b595a67 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -1,6 +1,6 @@ -use anyhow::format_err; use std::str::FromStr; +use anyhow::format_err; use regex::Regex; use serde::{Deserialize, Serialize}; From e98fb9d5b181e655a2f9a09dca2f05c0c037b59c Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 4 Jan 2024 10:20:16 +0100 Subject: [PATCH 236/299] api-types: factor out FilterType parsing simply keep the previous FromStr implementation and call it the new GroupFilter impl Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/jobs.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 5b595a67..bc5d749b 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use anyhow::format_err; +use anyhow::bail; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -408,6 +408,20 @@ impl PartialEq for FilterType { } } +impl std::str::FromStr for FilterType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(match s.split_once(':') { + Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?, + Some(("type", value)) => FilterType::BackupType(value.parse()?), + Some(("regex", value)) => FilterType::Regex(Regex::new(value)?), + Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty), + None => bail!("input doesn't match expected format '|regex:REGEX>'"), + }) + } +} + #[derive(Clone, Debug)] pub struct GroupFilter { pub is_exclude: bool, @@ -432,16 +446,9 @@ impl std::str::FromStr for GroupFilter { _ => (false, s), }; - let filter_type = match type_str.split_once(':') { - Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string())), - Some(("type", value)) => Ok(FilterType::BackupType(value.parse()?)), - Some(("regex", value)) => Ok(FilterType::Regex(Regex::new(value)?)), - Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), - None => Err(format_err!("input doesn't match expected format '|regex:REGEX>'")), - }?; Ok(GroupFilter { is_exclude, - filter_type, + filter_type: type_str.parse()?, }) } } From 01618ea991610d3d59116558567f387b9dbfe0ac Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 4 Jan 2024 11:05:51 +0100 Subject: [PATCH 237/299] api-types: impl Display for FilterType as the previous commit: simply keep the previous Display impl and call it from out of the new GroupFilter impl Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/jobs.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index bc5d749b..798dea0f 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -422,6 +422,17 @@ impl std::str::FromStr for FilterType { } } +// used for serializing below, caution! +impl std::fmt::Display for FilterType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type), + FilterType::Group(backup_group) => write!(f, "group:{}", backup_group), + FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()), + } + } +} + #[derive(Clone, Debug)] pub struct GroupFilter { pub is_exclude: bool, @@ -456,12 +467,10 @@ impl std::str::FromStr for GroupFilter { // used for serializing below, caution! impl std::fmt::Display for GroupFilter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let exclude = if self.is_exclude { "exclude:" } else { "" }; - match &self.filter_type { - FilterType::BackupType(backup_type) => write!(f, "{}type:{}", exclude, backup_type), - FilterType::Group(backup_group) => write!(f, "{}group:{}", exclude, backup_group), - FilterType::Regex(regex) => write!(f, "{}regex:{}", exclude, regex.as_str()), + if self.is_exclude { + f.write_str("exclude:")?; } + std::fmt::Display::fmt(&self.filter_type, f) } } From c5714ff06f67d0271fe1e725e9d78b18a5edbfc2 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 4 Jan 2024 11:06:01 +0100 Subject: [PATCH 238/299] api-types: doc improvements Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/jobs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 798dea0f..80578d80 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -482,7 +482,7 @@ fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { } pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( - "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE'). Can be inverted by adding 'exclude:' before.") + "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.") .format(&ApiStringFormat::VerifyFn(verify_group_filter)) .type_text("[]|group:GROUP|regex:RE>") .schema(); From c2545b65405633df5ce45f8c06d1ac6cc42d0c6c Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 4 Jan 2024 12:10:12 +0100 Subject: [PATCH 239/299] move api-types tests to api-types and drop vec![] macro we don't need to allocate here Signed-off-by: Wolfgang Bumiller --- pbs-api-types/tests/group_filter_tests.rs | 76 +++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 pbs-api-types/tests/group_filter_tests.rs diff --git a/pbs-api-types/tests/group_filter_tests.rs b/pbs-api-types/tests/group_filter_tests.rs new file mode 100644 index 00000000..89a7ddd1 --- /dev/null +++ b/pbs-api-types/tests/group_filter_tests.rs @@ -0,0 +1,76 @@ +use pbs_api_types::{BackupGroup, BackupType, GroupFilter}; +use std::str::FromStr; + +#[test] +fn test_no_filters() { + let group_filters = vec![]; + + let do_backup = [ + "vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109", + ]; + + for id in do_backup { + assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } +} + +#[test] +fn test_include_filters() { + let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()]; + + let do_backup = [ + "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", + ]; + + let dont_backup = ["vm/101", "vm/109"]; + + for id in do_backup { + assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } + + for id in dont_backup { + assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } +} + +#[test] +fn test_exclude_filters() { + let group_filters = [ + GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(), + GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(), + ]; + + let do_backup = ["vm/104", "vm/108", "vm/109"]; + + let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"]; + + for id in do_backup { + assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } + for id in dont_backup { + assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } +} + +#[test] +fn test_include_and_exclude_filters() { + let group_filters = [ + GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(), + GroupFilter::from_str("regex:.*10[2-8]").unwrap(), + GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(), + ]; + + let do_backup = ["vm/104", "vm/108"]; + + let dont_backup = [ + "vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109", + ]; + + for id in do_backup { + assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } + + for id in dont_backup { + assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); + } +} From 8fabade99deef15c502b1a987e646a055fd8b625 Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Tue, 6 Feb 2024 11:09:07 +0100 Subject: [PATCH 240/299] fix #5190: api: OIDC: accept generic URIs for the ACR value Allow more complex strings for the acr-value when using openid. The openid documentation only specifies the acr-value *should* be an URI [0]. Implemented a regex that loosely disallows some of the reserved URI characters specified in the RFC [1]. Currently values like: - "urn:mace:incommon:iap:silver" - "urn:comsolve.nl:idp:contract:rba:location" do NOT work, although they are correct URI's and common acr tokens. For Proxmox VE we had to actually make this more strict to align with each other, as there we accepted any string. [0]: https://openid.net/specs/openid-connect-core-1_0.html [1]: https://www.rfc-editor.org/rfc/rfc2396.txt Signed-off-by: Gabriel Goller --- pbs-api-types/src/lib.rs | 5 +++++ pbs-api-types/src/openid.rs | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 795ff2a6..88e8f44d 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -178,6 +178,11 @@ const_regex! { /// any identifier command line tools work with. pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); + /// Regex that (loosely) matches URIs according to [RFC 2396](https://www.rfc-editor.org/rfc/rfc2396.txt) + /// This does not completely match a URI, but rather disallows all the prohibited characters + /// specified in the RFC. + pub GENERIC_URI_REGEX = r#"^[^\x00-\x1F\x7F <>#"]*$"#; + pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$"; diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs index 2c7646a3..2c95c5c6 100644 --- a/pbs-api-types/src/openid.rs +++ b/pbs-api-types/src/openid.rs @@ -3,7 +3,8 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater}; use super::{ - PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, + GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, + SINGLE_LINE_COMMENT_SCHEMA, }; pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); @@ -24,11 +25,11 @@ pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope Lis .default(OPENID_DEFAILT_SCOPE_LIST) .schema(); -pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); +pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX); pub const OPENID_ACR_SCHEMA: Schema = StringSchema::new("OpenID Authentication Context Class Reference.") - .format(&OPENID_SCOPE_FORMAT) + .format(&OPENID_ACR_FORMAT) .schema(); pub const OPENID_ACR_ARRAY_SCHEMA: Schema = From b8f2582bd9f3663fdaf871ac40fe05ab883ed3ce Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 20 Mar 2024 11:03:55 +0100 Subject: [PATCH 241/299] pbs-api-types: use const_format and new api-types from proxmox-schema Signed-off-by: Dietmar Maurer --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/acl.rs | 5 +- pbs-api-types/src/common_regex.rs | 78 --------- pbs-api-types/src/datastore.rs | 29 ++-- pbs-api-types/src/jobs.rs | 11 +- pbs-api-types/src/lib.rs | 280 ++++++++---------------------- pbs-api-types/src/tape/mod.rs | 8 +- 7 files changed, 101 insertions(+), 311 deletions(-) delete mode 100644 pbs-api-types/src/common_regex.rs diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 31b69f62..94ab583b 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -7,6 +7,7 @@ description = "general API type helpers for PBS" [dependencies] anyhow.workspace = true +const_format.workspace = true hex.workspace = true lazy_static.workspace = true percent-encoding.workspace = true diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index 8bbd2958..b2583b6a 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -2,14 +2,17 @@ use std::str::FromStr; use serde::de::{value, IntoDeserializer}; use serde::{Deserialize, Serialize}; +use const_format::concatcp; use proxmox_lang::constnamedbitmap; use proxmox_schema::{ api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema, }; +use crate::PROXMOX_SAFE_ID_REGEX_STR; + const_regex! { - pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$"); + pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$"); } // define Privilege bitfield diff --git a/pbs-api-types/src/common_regex.rs b/pbs-api-types/src/common_regex.rs deleted file mode 100644 index 8fe30673..00000000 --- a/pbs-api-types/src/common_regex.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! Predefined Regular Expressions -//! -//! This is a collection of useful regular expressions - -use lazy_static::lazy_static; -use regex::Regex; - -#[rustfmt::skip] -#[macro_export] -macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") } -#[rustfmt::skip] -#[macro_export] -macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") } -#[rustfmt::skip] -#[macro_export] -macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) } - -/// Returns the regular expression string to match IPv4 addresses -#[rustfmt::skip] -#[macro_export] -macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) } - -/// Returns the regular expression string to match IPv6 addresses -#[rustfmt::skip] -#[macro_export] -macro_rules! IPV6RE { () => (concat!(r"(?:", - r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|", - r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|", - r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|", - r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))")) -} - -/// Returns the regular expression string to match IP addresses (v4 or v6) -#[rustfmt::skip] -#[macro_export] -macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) } - -/// Regular expression string to match IP addresses where IPv6 addresses require brackets around -/// them, while for IPv4 they are forbidden. -#[rustfmt::skip] -#[macro_export] -macro_rules! IPRE_BRACKET { () => ( - concat!(r"(?:", - IPV4RE!(), - r"|\[(?:", - IPV6RE!(), - r")\]", - r")")) -} - -lazy_static! { - pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap(); - pub static ref IP_BRACKET_REGEX: Regex = - Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap(); - pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap(); - pub static ref SYSTEMD_DATETIME_REGEX: Regex = - Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap(); -} - -#[test] -fn test_regexes() { - assert!(IP_REGEX.is_match("127.0.0.1")); - assert!(IP_REGEX.is_match("::1")); - assert!(IP_REGEX.is_match("2014:b3a::27")); - assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1")); - assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF")); - - assert!(IP_BRACKET_REGEX.is_match("127.0.0.1")); - assert!(IP_BRACKET_REGEX.is_match("[::1]")); - assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]")); - assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]")); - assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]")); -} diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index cce9888b..5e13c157 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -2,6 +2,7 @@ use std::fmt; use std::path::PathBuf; use anyhow::{bail, format_err, Error}; +use const_format::concatcp; use serde::{Deserialize, Serialize}; use proxmox_schema::{ @@ -10,31 +11,33 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, - DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, - PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, UPID, + Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, BACKUP_ID_RE, + BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, + GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT, + PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, + SNAPSHOT_PATH_REGEX_STR, UPID, }; const_regex! { - pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub BACKUP_NAMESPACE_REGEX = concatcp!(r"^", BACKUP_NS_RE, r"$"); - pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); + pub BACKUP_TYPE_REGEX = concatcp!(r"^(", BACKUP_TYPE_RE, r")$"); - pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); + pub BACKUP_ID_REGEX = concatcp!(r"^", BACKUP_ID_RE, r"$"); - pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); + pub BACKUP_DATE_REGEX = concatcp!(r"^", BACKUP_TIME_RE ,r"$"); - pub GROUP_PATH_REGEX = concat!( - r"^(", BACKUP_TYPE_RE!(), ")/", - r"(", BACKUP_ID_RE!(), r")$", + pub GROUP_PATH_REGEX = concatcp!( + r"^(", BACKUP_TYPE_RE, ")/", + r"(", BACKUP_ID_RE, r")$", ); pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; - pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); - pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub SNAPSHOT_PATH_REGEX = concatcp!(r"^", SNAPSHOT_PATH_REGEX_STR, r"$"); + pub GROUP_OR_SNAPSHOT_PATH_REGEX = concatcp!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR, r"$"); - pub DATASTORE_MAP_REGEX = concat!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); + pub DATASTORE_MAP_REGEX = concatcp!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR, r"=)?", PROXMOX_SAFE_ID_REGEX_STR, r"$"); } pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 80578d80..6fb9b187 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -1,6 +1,7 @@ use std::str::FromStr; use anyhow::bail; +use const_format::concatcp; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -8,17 +9,17 @@ use proxmox_schema::*; use crate::{ Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, - NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, - SINGLE_LINE_COMMENT_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, + MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, + PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; const_regex! { /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' - pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); + pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):"); /// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' - pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:"); + pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:"); } pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 88e8f44d..7eb836ed 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,10 +1,8 @@ //! Basic API types used by most of the PBS code. +use const_format::concatcp; use serde::{Deserialize, Serialize}; -use proxmox_auth_api::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR}; - -pub mod common_regex; pub mod percent_encoding; use proxmox_schema::{ @@ -12,59 +10,78 @@ use proxmox_schema::{ }; use proxmox_time::parse_daily_duration; -#[rustfmt::skip] -#[macro_export] -macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; } +use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR}; + +pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT; +pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX; +pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR; +pub use proxmox_schema::api_types::{ + BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX, +}; +pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX}; +pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX}; +pub use proxmox_schema::api_types::{ + GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX, +}; +pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX}; +pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX}; + +pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX}; +pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX}; +pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX}; +pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX}; +pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX}; +pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX}; + +pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA; +pub use proxmox_schema::api_types::HOSTNAME_SCHEMA; +pub use proxmox_schema::api_types::HOST_PORT_SCHEMA; +pub use proxmox_schema::api_types::HTTP_URL_SCHEMA; +pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA; +pub use proxmox_schema::api_types::NODE_SCHEMA; +pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT; +pub use proxmox_schema::api_types::{ + BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, +}; +pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT}; +pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA}; +pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA}; +pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA}; +pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT}; +pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA}; + +use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR}; #[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") } +pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*"; #[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") } +pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)"; #[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") } +pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"; #[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_NS_RE { - () => ( - concat!("(?:", - "(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!(), - ")?") +pub const BACKUP_NS_RE: &str = + concatcp!("(?:", + "(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR, + ")?"); + +#[rustfmt::skip] +pub const BACKUP_NS_PATH_RE: &str = + concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/"); + +#[rustfmt::skip] +pub const SNAPSHOT_PATH_REGEX_STR: &str = + concatcp!( + r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")", ); -} #[rustfmt::skip] -#[macro_export] -macro_rules! BACKUP_NS_PATH_RE { - () => ( - concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/") +pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str = + concatcp!( + r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?", ); -} - -#[rustfmt::skip] -#[macro_export] -macro_rules! SNAPSHOT_PATH_REGEX_STR { - () => ( - concat!( - r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", - ) - ); -} - -#[rustfmt::skip] -#[macro_export] -macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { - () => { - concat!( - r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?", - ) - }; -} mod acl; pub use acl::*; @@ -128,102 +145,28 @@ pub use zfs::*; mod metrics; pub use metrics::*; -#[rustfmt::skip] -#[macro_use] -mod local_macros { - macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") } - macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) } - macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) } - macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) } - macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") } - macro_rules! DNS_ALIAS_NAME { - () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")")) - } - macro_rules! PORT_REGEX_STR { () => (r"(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])") } -} - const_regex! { - pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$"); - pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$"); - pub IP_REGEX = concat!(r"^", IPRE!(), r"$"); - pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$"); - pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$"); - pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$"); - pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$"; - pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$"); - pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$"); - pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$"); - pub HOST_PORT_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), "):", PORT_REGEX_STR!() ,"$"); - pub HTTP_URL_REGEX = concat!(r"^https?://(?:(?:(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), ")(?::", PORT_REGEX_STR!() ,")?)|", IPV6RE!(),")(?:/[^\x00-\x1F\x7F]*)?$"); - - pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ? - - pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters - - pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$"; - - pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ? - - pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$"; - // just a rough check - dummy acceptor is used before persisting pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; - /// Regex for safe identifiers. - /// - /// This - /// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html) - /// contains further information why it is reasonable to restict - /// names this way. This is not only useful for filenames, but for - /// any identifier command line tools work with. - pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$"); - - /// Regex that (loosely) matches URIs according to [RFC 2396](https://www.rfc-editor.org/rfc/rfc2396.txt) - /// This does not completely match a URI, but rather disallows all the prohibited characters - /// specified in the RFC. - pub GENERIC_URI_REGEX = r#"^[^\x00-\x1F\x7F <>#"]*$"#; - - pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$"; - - pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$"; - - pub BACKUP_REPO_URL_REGEX = concat!( + pub BACKUP_REPO_URL_REGEX = concatcp!( r"^^(?:(?:(", - USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), + USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR, ")@)?(", - DNS_NAME!(), "|", IPRE_BRACKET!(), - "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$" + DNS_NAME_STR, "|", IPRE_BRACKET_STR, + "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$" ); - pub BLOCKDEVICE_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+)|(?:nvme\d+n\d+)$"; - pub BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+\d*)|(?:nvme\d+n\d+(p\d+)?)$"; - pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); + pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); } -pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX); -pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX); -pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX); -pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX); -pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX); -pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX); pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX); -pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX); -pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX); -pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX); + pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); -pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX); -pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX); + pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); -pub const HOST_PORT_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOST_PORT_REGEX); -pub const HTTP_URL_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HTTP_URL_REGEX); - -pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX); pub const DAILY_DURATION_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop)); @@ -243,10 +186,6 @@ pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server .format(&IP_FORMAT) .schema(); -pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).") - .format(&HOSTNAME_FORMAT) - .schema(); - pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") .format(&OPENSSL_CIPHERS_TLS_FORMAT) @@ -257,62 +196,6 @@ pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = .format(&OPENSSL_CIPHERS_TLS_FORMAT) .schema(); -pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX); - -pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX); - -pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.") - .format(&DNS_NAME_OR_IP_FORMAT) - .schema(); - -pub const HOST_PORT_SCHEMA: Schema = - StringSchema::new("host:port combination (Host can be DNS name or IP address).") - .format(&HOST_PORT_FORMAT) - .schema(); - -pub const HTTP_URL_SCHEMA: Schema = StringSchema::new("HTTP(s) url with optional port.") - .format(&HTTP_URL_FORMAT) - .schema(); - -pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')") - .format(&HOSTNAME_FORMAT) - .schema(); - -pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new( - "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.", -) -.format(&SINGLE_LINE_COMMENT_FORMAT) -.min_length(2) -.max_length(64) -.schema(); - -pub const BLOCKDEVICE_NAME_SCHEMA: Schema = - StringSchema::new("Block device name (/sys/block/).") - .format(&BLOCKDEVICE_NAME_FORMAT) - .min_length(3) - .max_length(64) - .schema(); - -pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA: Schema = - StringSchema::new("(Partition) block device name (/sys/class/block/).") - .format(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT) - .min_length(3) - .max_length(64) - .schema(); - -pub const DISK_ARRAY_SCHEMA: Schema = - ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema(); - -pub const DISK_LIST_SCHEMA: Schema = StringSchema::new("A list of disk names, comma separated.") - .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA)) - .schema(); - -pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.") - .format(&PASSWORD_FORMAT) - .min_length(1) - .max_length(1024) - .schema(); - pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.") .format(&PASSWORD_FORMAT) .min_length(5) @@ -325,31 +208,6 @@ pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.") .max_length(32) .schema(); -pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX); - -pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = - StringSchema::new("X509 certificate fingerprint (sha256).") - .format(&FINGERPRINT_SHA256_FORMAT) - .schema(); - -pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); - -pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX); - -pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .schema(); - -pub const MULTI_LINE_COMMENT_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&MULTI_LINE_COMMENT_REGEX); - -pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multiple lines).") - .format(&MULTI_LINE_COMMENT_FORMAT) - .schema(); - pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.") .format(&SUBSCRIPTION_KEY_FORMAT) @@ -357,8 +215,6 @@ pub const SUBSCRIPTION_KEY_SCHEMA: Schema = .max_length(16) .schema(); -pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.").max_length(256).schema(); - pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( "Prevent changes if current configuration file has different \ SHA256 digest. This can be used to prevent concurrent \ diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs index 99d7cb74..6a9d56bc 100644 --- a/pbs-api-types/src/tape/mod.rs +++ b/pbs-api-types/src/tape/mod.rs @@ -22,15 +22,19 @@ pub use media_location::*; mod media; pub use media::*; +use const_format::concatcp; use serde::{Deserialize, Serialize}; use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; use proxmox_uuid::Uuid; -use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT}; +use crate::{ + BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT, + PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR, +}; const_regex! { - pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(?:", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$"); + pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$"); } pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = From 158f98fe72296a33439d0f6a064c8a37e6e86e73 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 20 Mar 2024 11:13:13 +0100 Subject: [PATCH 242/299] cargo fmt (import reordering) Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/acl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index b2583b6a..ef639862 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -1,8 +1,8 @@ use std::str::FromStr; +use const_format::concatcp; use serde::de::{value, IntoDeserializer}; use serde::{Deserialize, Serialize}; -use const_format::concatcp; use proxmox_lang::constnamedbitmap; use proxmox_schema::{ From 71ff7c3344e65bed4e0cebdd6c5908ad88aafa57 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Mon, 4 Mar 2024 14:26:18 +0100 Subject: [PATCH 243/299] datastore: remove datastore from internal cache based on maintenance mode We keep a DataStore cache, so ChunkStore's and lock files are kept by the proxy process and don't have to be reopened every time. However, for specific maintenance modes, e.g. 'offline', our process should not keep file in that datastore open. This clears the cache entry of a datastore if it is in a specific maintanance mode and the last task finished, which also drops any files still open by the process. Signed-off-by: Hannes Laimer Reviewed-by: Gabriel Goller Tested-by: Gabriel Goller --- pbs-api-types/src/maintenance.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 1b03ca94..a605cc17 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -77,6 +77,12 @@ pub struct MaintenanceMode { } impl MaintenanceMode { + /// Used for deciding whether the datastore is cleared from the internal cache after the last + /// task finishes, so all open files are closed. + pub fn is_offline(&self) -> bool { + self.ty == MaintenanceType::Offline + } + pub fn check(&self, operation: Option) -> Result<(), Error> { if self.ty == MaintenanceType::Delete { bail!("datastore is being deleted"); From 73bf2b1994556ccd422823ad61d1209e3b846e08 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 22 Apr 2024 10:31:28 +0200 Subject: [PATCH 244/299] pbs-api-types: use SchemaDeserializer for maintenance mode Signed-off-by: Dietmar Maurer Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 5e13c157..699b9e15 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -11,8 +11,8 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, BACKUP_ID_RE, - BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, + Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, + BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, SNAPSHOT_PATH_REGEX_STR, UPID, @@ -336,10 +336,13 @@ impl DataStoreConfig { } pub fn get_maintenance_mode(&self) -> Option { - self.maintenance_mode - .as_ref() - .and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok()) - .and_then(|value| MaintenanceMode::deserialize(value).ok()) + self.maintenance_mode.as_ref().and_then(|str| { + MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new( + str, + &MaintenanceMode::API_SCHEMA, + )) + .ok() + }) } } From 15c013f758e4c3c565ba9e62e5aac5728172eb25 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 22 Apr 2024 10:31:29 +0200 Subject: [PATCH 245/299] maintenance: derive Copy for maintenance type and make maintenance mode fields public Because it is a public api type. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/maintenance.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index a605cc17..1e3413dc 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -33,7 +33,7 @@ pub enum Operation { } #[api] -#[derive(Deserialize, Serialize, PartialEq, Eq)] +#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] /// Maintenance type. pub enum MaintenanceType { @@ -69,11 +69,11 @@ serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); pub struct MaintenanceMode { /// Type of maintenance ("read-only" or "offline"). #[serde(rename = "type")] - ty: MaintenanceType, + pub ty: MaintenanceType, /// Reason for maintenance. #[serde(skip_serializing_if = "Option::is_none")] - message: Option, + pub message: Option, } impl MaintenanceMode { From bec18b8e60b0ead134b0789add2a9bad34164213 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 22 Apr 2024 10:31:30 +0200 Subject: [PATCH 246/299] api: assert that maintenance mode transitions are valid Maintenance mode Delete locks the datastore. It must not be possible to go back to normal modes, because the datastore may be in undefined state. Signed-off-by: Dietmar Maurer --- pbs-api-types/src/datastore.rs | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 699b9e15..8a8ec12d 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -11,7 +11,7 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, + Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, MaintenanceType, Userid, BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, @@ -344,6 +344,37 @@ impl DataStoreConfig { .ok() }) } + + pub fn set_maintenance_mode(&mut self, new_mode: Option) -> Result<(), Error> { + let current_type = self.get_maintenance_mode().map(|mode| mode.ty); + let new_type = new_mode.as_ref().map(|mode| mode.ty); + + match current_type { + Some(MaintenanceType::ReadOnly) => { /* always OK */ } + Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Delete) => { + match new_type { + Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } + _ => { + bail!("datastore is being deleted") + } + } + } + None => { /* always OK */ } + } + + let new_mode = match new_mode { + Some(new_mode) => Some( + proxmox_schema::property_string::PropertyString::new(new_mode) + .to_property_string()?, + ), + None => None, + }; + + self.maintenance_mode = new_mode; + + Ok(()) + } } #[api( From 163732177d450c2902a480dc88161b322b696606 Mon Sep 17 00:00:00 2001 From: Stefan Lendl Date: Thu, 18 Apr 2024 12:16:57 +0200 Subject: [PATCH 247/299] api: garbage collect job status Adds an api endpoint on the datastore that reports the gc job status such as: - Schedule - State (of last run) - Duration (of last run) - Last Run - Next Run (if scheduled) - Pending Chunks (of last run) - Pending Bytes (of last run) - Removed Chunks (of last run) - Removed Bytes (of last run) Adds a dedicated endpoint admin/gc that reports gc job status for all datastores including the onces without a gc-schedule. Signed-off-by: Stefan Lendl Originally-by: Gabriel Goller Tested-by: Gabriel Goller Reviewd-by: Gabriel Goller Tested-by: Lukas Wagner Reviewed-by: Lukas Wagner --- pbs-api-types/src/datastore.rs | 46 ++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8a8ec12d..c6641655 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1307,6 +1307,52 @@ pub struct GarbageCollectionStatus { pub still_bad: usize, } +#[api( + properties: { + "last-run-upid": { + optional: true, + type: UPID, + }, + }, +)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +/// Garbage Collection general info +pub struct GarbageCollectionJobStatus { + /// Datastore + pub store: String, + /// upid of the last run gc job + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_upid: Option, + /// Sum of removed bytes. + #[serde(skip_serializing_if = "Option::is_none")] + pub removed_bytes: Option, + /// Number of removed chunks + #[serde(skip_serializing_if = "Option::is_none")] + pub removed_chunks: Option, + /// Sum of pending bytes + #[serde(skip_serializing_if = "Option::is_none")] + pub pending_bytes: Option, + /// Number of pending chunks + #[serde(skip_serializing_if = "Option::is_none")] + pub pending_chunks: Option, + /// Schedule of the gc job + #[serde(skip_serializing_if = "Option::is_none")] + pub schedule: Option, + /// Time of the next gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub next_run: Option, + /// Endtime of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_endtime: Option, + /// State of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_state: Option, + /// Duration of last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub duration: Option, +} + #[api( properties: { "gc-status": { From 730f4e58ff6e75a7075c184093398ecc715001ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Mon, 22 Apr 2024 11:02:57 +0200 Subject: [PATCH 248/299] GC: flatten existing status into job status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to avoid drifting definitions and reduce duplication. with the next major release, the 'upid' field could then be renamed and aliased to be in line with the other jobs, which all use 'last-run-upid'. doing it now would break existing callers of the GC status endpoint (or consumers of the on-disk status file). the main difference is that the GC status fields are now not optional (except for the UPID) in the job status, since flattening an optional value is not possible. this only affects datastores that were never GCed at all, and only direct API consumers, since the UI handles those fields correctly. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index c6641655..45dd41ae 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1280,7 +1280,7 @@ pub struct TypeCounts { }, }, )] -#[derive(Clone, Default, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] /// Garbage collection status. pub struct GarbageCollectionStatus { @@ -1309,11 +1309,10 @@ pub struct GarbageCollectionStatus { #[api( properties: { - "last-run-upid": { - optional: true, - type: UPID, + "status": { + type: GarbageCollectionStatus, }, - }, + } )] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] @@ -1321,21 +1320,8 @@ pub struct GarbageCollectionStatus { pub struct GarbageCollectionJobStatus { /// Datastore pub store: String, - /// upid of the last run gc job - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_upid: Option, - /// Sum of removed bytes. - #[serde(skip_serializing_if = "Option::is_none")] - pub removed_bytes: Option, - /// Number of removed chunks - #[serde(skip_serializing_if = "Option::is_none")] - pub removed_chunks: Option, - /// Sum of pending bytes - #[serde(skip_serializing_if = "Option::is_none")] - pub pending_bytes: Option, - /// Number of pending chunks - #[serde(skip_serializing_if = "Option::is_none")] - pub pending_chunks: Option, + #[serde(flatten)] + pub status: GarbageCollectionStatus, /// Schedule of the gc job #[serde(skip_serializing_if = "Option::is_none")] pub schedule: Option, From 90603f6e250266dee19ecd804e92259883d1266c Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Tue, 23 Apr 2024 13:52:04 +0200 Subject: [PATCH 249/299] api-types: api: datatore: add notification-mode parameter This one lets the user choose between the old notification behavior (selecting an email address/user and always/error/never behavior per datastore) and the new one (emit notification events to the notification system) Signed-off-by: Lukas Wagner Tested-by: Gabriel Goller Reviewed-by: Gabriel Goller Tested-by: Maximiliano Sandoval Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/datastore.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 45dd41ae..31767417 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -309,6 +309,10 @@ pub struct DataStoreConfig { #[serde(skip_serializing_if = "Option::is_none")] pub notify: Option, + /// Opt in to the new notification system + #[serde(skip_serializing_if = "Option::is_none")] + pub notification_mode: Option, + /// Datastore tuning options #[serde(skip_serializing_if = "Option::is_none")] pub tuning: Option, @@ -318,6 +322,23 @@ pub struct DataStoreConfig { pub maintenance_mode: Option, } +#[api] +#[derive(Serialize, Deserialize, Updater, Clone, PartialEq, Default)] +#[serde(rename_all = "kebab-case")] +/// Configure how notifications for this datastore should be sent. +/// `legacy-sendmail` sends email notifications to the user configured +/// in `notify-user` via the system's `sendmail` executable. +/// `notification-system` emits matchable notification events to the +/// notification system. +pub enum NotificationMode { + /// Send notifications via the system's sendmail command to the user + /// configured in `notify-user` + #[default] + LegacySendmail, + /// Emit notification events to the notification system + NotificationSystem, +} + impl DataStoreConfig { pub fn new(name: String, path: String) -> Self { Self { @@ -330,6 +351,7 @@ impl DataStoreConfig { verify_new: None, notify_user: None, notify: None, + notification_mode: None, tuning: None, maintenance_mode: None, } From 13726178769d1ff2661cccb444396c61208fd8ce Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Tue, 23 Apr 2024 13:52:05 +0200 Subject: [PATCH 250/299] api-types: api: tape: add notification-mode parameter Same as with datastores, this option determines whether we send notifications the old way (send email via sendmail to a user's email address) or the new way (emit matchable notification events to the notification stack). Signed-off-by: Lukas Wagner Tested-by: Gabriel Goller Reviewed-by: Gabriel Goller Tested-by: Maximiliano Sandoval Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/jobs.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 6fb9b187..868702bc 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, + Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid, + BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, + DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -325,6 +325,8 @@ pub struct TapeBackupJobSetup { #[serde(skip_serializing_if = "Option::is_none")] pub notify_user: Option, #[serde(skip_serializing_if = "Option::is_none")] + pub notification_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub group_filter: Option>, #[serde(skip_serializing_if = "Option::is_none", default)] pub ns: Option, From 8bdf9ac45c5557bd939f8003c25d25fb488ebcda Mon Sep 17 00:00:00 2001 From: Christoph Heiss Date: Fri, 12 Jan 2024 17:16:02 +0100 Subject: [PATCH 251/299] api: access: add routes for managing AD realms Signed-off-by: Christoph Heiss Reviewed-by: Lukas Wagner Tested-by: Lukas Wagner --- pbs-api-types/src/ad.rs | 98 ++++++++++++++++++++++++++++++++++++++++ pbs-api-types/src/lib.rs | 3 ++ 2 files changed, 101 insertions(+) create mode 100644 pbs-api-types/src/ad.rs diff --git a/pbs-api-types/src/ad.rs b/pbs-api-types/src/ad.rs new file mode 100644 index 00000000..910571a0 --- /dev/null +++ b/pbs-api-types/src/ad.rs @@ -0,0 +1,98 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{api, Updater}; + +use super::{ + LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, + SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA, +}; + +#[api( + properties: { + "realm": { + schema: REALM_ID_SCHEMA, + }, + "comment": { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + "verify": { + optional: true, + default: false, + }, + "sync-defaults-options": { + schema: SYNC_DEFAULTS_STRING_SCHEMA, + optional: true, + }, + "sync-attributes": { + schema: SYNC_ATTRIBUTES_SCHEMA, + optional: true, + }, + "user-classes" : { + optional: true, + schema: USER_CLASSES_SCHEMA, + }, + "base-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + optional: true, + }, + "bind-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + optional: true, + } + }, +)] +#[derive(Serialize, Deserialize, Updater, Clone)] +#[serde(rename_all = "kebab-case")] +/// AD realm configuration properties. +pub struct AdRealmConfig { + #[updater(skip)] + pub realm: String, + /// AD server address + pub server1: String, + /// Fallback AD server address + #[serde(skip_serializing_if = "Option::is_none")] + pub server2: Option, + /// AD server Port + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + /// Base domain name. Users are searched under this domain using a `subtree search`. + /// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be + /// overridden if the need arises. + #[serde(skip_serializing_if = "Option::is_none")] + pub base_dn: Option, + /// Comment + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// Connection security + #[serde(skip_serializing_if = "Option::is_none")] + pub mode: Option, + /// Verify server certificate + #[serde(skip_serializing_if = "Option::is_none")] + pub verify: Option, + /// CA certificate to use for the server. The path can point to + /// either a file, or a directory. If it points to a file, + /// the PEM-formatted X.509 certificate stored at the path + /// will be added as a trusted certificate. + /// If the path points to a directory, + /// the directory replaces the system's default certificate + /// store at `/etc/ssl/certs` - Every file in the directory + /// will be loaded as a trusted certificate. + #[serde(skip_serializing_if = "Option::is_none")] + pub capath: Option, + /// Bind domain to use for looking up users + #[serde(skip_serializing_if = "Option::is_none")] + pub bind_dn: Option, + /// Custom LDAP search filter for user sync + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + /// Default options for AD sync + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_defaults_options: Option, + /// List of LDAP attributes to sync from AD to user config + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_attributes: Option, + /// User ``objectClass`` classes to sync + #[serde(skip_serializing_if = "Option::is_none")] + pub user_classes: Option, +} diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 7eb836ed..72cddf38 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -130,6 +130,9 @@ pub use openid::*; mod ldap; pub use ldap::*; +mod ad; +pub use ad::*; + mod remote; pub use remote::*; From 7db5cd8c485c7f09248c7e79569277ca12bd6b6e Mon Sep 17 00:00:00 2001 From: Christoph Heiss Date: Fri, 12 Jan 2024 17:16:04 +0100 Subject: [PATCH 252/299] realm sync: add sync job for AD realms Basically just a thin wrapper over the existing LDAP-based realm sync job, which retrieves the appropriate config and sets the correct user attributes. Signed-off-by: Christoph Heiss Reviewed-by: Lukas Wagner Tested-by: Lukas Wagner --- pbs-api-types/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 72cddf38..a3ad185b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -381,8 +381,13 @@ pub enum RealmType { OpenId, /// An LDAP realm Ldap, + /// An Active Directory (AD) realm + Ad, } +serde_plain::derive_display_from_serialize!(RealmType); +serde_plain::derive_fromstr_from_deserialize!(RealmType); + #[api( properties: { realm: { From f2633b462f1c95a55a6c3dfb1be1bfba478523c5 Mon Sep 17 00:00:00 2001 From: Stefan Lendl Date: Thu, 4 Apr 2024 12:00:31 +0200 Subject: [PATCH 253/299] config: write vlan network interface * Add vlan_id and vlan_raw_device fields to the Interface api type * Write to the network config the vlan specific properties for vlan interface type * Add several tests to verify the functionally Signed-off-by: Stefan Lendl Tested-by: Lukas Wagner Reviewed-by: Lukas Wagner Tested-by: Folke Gleumes Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/network.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index e3a5e481..fe083dc6 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -224,6 +224,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = schema: NETWORK_INTERFACE_ARRAY_SCHEMA, optional: true, }, + "vlan-id": { + description: "VLAN ID.", + type: u16, + optional: true, + }, + "vlan-raw-device": { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + optional: true, + }, bond_mode: { type: LinuxBondMode, optional: true, @@ -287,6 +296,12 @@ pub struct Interface { /// Enable bridge vlan support. #[serde(skip_serializing_if = "Option::is_none")] pub bridge_vlan_aware: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "vlan-id")] + pub vlan_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "vlan-raw-device")] + pub vlan_raw_device: Option, #[serde(skip_serializing_if = "Option::is_none")] pub slaves: Option>, @@ -319,6 +334,8 @@ impl Interface { mtu: None, bridge_ports: None, bridge_vlan_aware: None, + vlan_id: None, + vlan_raw_device: None, slaves: None, bond_mode: None, bond_primary: None, From 16ac3ef458c7fee2f9caaef369e8f69efce35d99 Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Fri, 26 Apr 2024 16:02:43 +0200 Subject: [PATCH 254/299] api-types: remove influxdb bucket name restrictions Remove the regex for influxdb organizations and buckets. Influxdb does not place any constraints on these names and allows all characters. This allows influxdb organization names with slashes. Also remove a duplicate comment and add some missing ones. This also aligns the behavior to PVE as there are no restrictions there either. The motivation for this patch is this forum post: https://forum.proxmox.com/threads/influx-db-organization-doesnt-allow-slash.145402/ Signed-off-by: Gabriel Goller --- pbs-api-types/src/metrics.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index 6800c23b..23421035 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -12,14 +12,12 @@ pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID .schema(); pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.") - .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) .max_length(32) .default("proxmox") .schema(); pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.") - .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) .max_length(32) .default("proxmox") @@ -129,13 +127,14 @@ pub struct InfluxDbHttp { pub enable: bool, /// The base url of the influxdb server pub url: String, - /// The Optional Token #[serde(skip_serializing_if = "Option::is_none")] /// The (optional) API token pub token: Option, #[serde(skip_serializing_if = "Option::is_none")] + /// Named location where time series data is stored pub bucket: Option, #[serde(skip_serializing_if = "Option::is_none")] + /// Workspace for a group of users pub organization: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The (optional) maximum body size From 8002011f7c8c787a46ab9575d55ef3978e6c9a45 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 13 May 2024 12:46:09 +0200 Subject: [PATCH 255/299] tape: save 'bytes used' in tape inventory and show them on the ui. This can help uses with seeing how much a tape is used. The value is updated on 'commit' and when the tape is changed during a backup. For drives not supporting the volume statistics, this is simply skipped. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/media.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs index 6792cd3c..6227f463 100644 --- a/pbs-api-types/src/tape/media.rs +++ b/pbs-api-types/src/tape/media.rs @@ -81,6 +81,9 @@ pub struct MediaListEntry { /// Media Pool #[serde(skip_serializing_if = "Option::is_none")] pub pool: Option, + #[serde(skip_serializing_if = "Option::is_none")] + /// Bytes currently used + pub bytes_used: Option, } #[api( From 175a9b3cd5a19177a16a291c2d631c8349a4cd67 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 13 May 2024 12:49:23 +0200 Subject: [PATCH 256/299] tape: add functions to parse drive device activity we use the VHF part from the DT Device Activity page for that. This is intended to query the drive for it's current state and activity. Currently only the activity is parsed and used. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/drive.rs | 65 +++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index 626c5d9c..de569980 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -276,3 +276,68 @@ pub struct Lp17VolumeStatistics { /// Volume serial number pub serial: String, } + +/// The DT Device Activity from DT Device Status LP page +#[api] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub enum DeviceActivity { + /// No activity + NoActivity, + /// Cleaning + Cleaning, + /// Loading + Loading, + /// Unloading + Unloading, + /// Other unspecified activity + Other, + /// Reading + Reading, + /// Writing + Writing, + /// Locating + Locating, + /// Rewinding + Rewinding, + /// Erasing + Erasing, + /// Formatting + Formatting, + /// Calibrating + Calibrating, + /// Other (DT) + OtherDT, + /// Updating microcode + MicrocodeUpdate, + /// Reading encrypted data + ReadingEncrypted, + /// Writing encrypted data + WritingEncrypted, +} + +impl TryFrom for DeviceActivity { + type Error = Error; + + fn try_from(value: u8) -> Result { + Ok(match value { + 0x00 => DeviceActivity::NoActivity, + 0x01 => DeviceActivity::Cleaning, + 0x02 => DeviceActivity::Loading, + 0x03 => DeviceActivity::Unloading, + 0x04 => DeviceActivity::Other, + 0x05 => DeviceActivity::Reading, + 0x06 => DeviceActivity::Writing, + 0x07 => DeviceActivity::Locating, + 0x08 => DeviceActivity::Rewinding, + 0x09 => DeviceActivity::Erasing, + 0x0A => DeviceActivity::Formatting, + 0x0B => DeviceActivity::Calibrating, + 0x0C => DeviceActivity::OtherDT, + 0x0D => DeviceActivity::MicrocodeUpdate, + 0x0E => DeviceActivity::ReadingEncrypted, + 0x0F => DeviceActivity::WritingEncrypted, + other => bail!("invalid DT device activity value: {:x}", other), + }) + } +} From 2bf32cb82015e4ba1ca7a47dce7e0b177323c01b Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 13 May 2024 12:49:24 +0200 Subject: [PATCH 257/299] tape: add drive activity to drive status api and show it in the gui for single drives. Adds the known values for the activity to the UI. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/drive.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index de569980..caa6b3b3 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -216,6 +216,9 @@ pub struct LtoDriveAndMediaStatus { /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) #[serde(skip_serializing_if = "Option::is_none")] pub medium_wearout: Option, + /// Current device activity + #[serde(skip_serializing_if = "Option::is_none")] + pub drive_activity: Option, } #[api()] From e226ddcc90e26b8fe5fc928cb1f8cc4ee2dcdf55 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 13 May 2024 12:49:26 +0200 Subject: [PATCH 258/299] tape: include drive activity in status Since we don't query each drives status seperately, but rely on a single call to the drives listing parameter for that, we now add the option to query the activity there too. This makes that data avaiable for us to show in a seperate (by default hidden) column. Also we show the activity in the 'State' column when the drive is idle from our perspective. This is useful when e.g. an LTO-9 tape is loaded the first time and is calibrating, since that happens automatically. Signed-off-by: Dominik Csapak --- pbs-api-types/src/tape/drive.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index caa6b3b3..2b788bd6 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -93,6 +93,9 @@ pub struct DriveListEntry { /// the state of the drive if locked #[serde(skip_serializing_if = "Option::is_none")] pub state: Option, + /// Current device activity + #[serde(skip_serializing_if = "Option::is_none")] + pub activity: Option, } #[api()] From 4a69e1cf6488f2476657cee87025b8ea2b94695b Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 21 Jun 2024 11:51:30 +0200 Subject: [PATCH 259/299] use new apt/apt-api-types crate --- pbs-api-types/Cargo.toml | 1 + pbs-api-types/src/lib.rs | 35 +++++++---------------------------- 2 files changed, 8 insertions(+), 28 deletions(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 94ab583b..808ff514 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -16,6 +16,7 @@ serde.workspace = true serde_plain.workspace = true proxmox-auth-api = { workspace = true, features = [ "api-types" ] } +proxmox-apt-api-types.workspace = true proxmox-human-byte.workspace = true proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index a3ad185b..40bcd8f1 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -52,6 +52,13 @@ pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA}; use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR}; +// re-export APT API types +pub use proxmox_apt_api_types::{ + APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile, + APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository, + APTUpdateInfo, APTUpdateOptions, +}; + #[rustfmt::skip] pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*"; @@ -249,34 +256,6 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") .max_length(64) .schema(); -#[api()] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "PascalCase")] -/// Describes a package for which an update is available. -pub struct APTUpdateInfo { - /// Package name - pub package: String, - /// Package title - pub title: String, - /// Package architecture - pub arch: String, - /// Human readable package description - pub description: String, - /// New version to be updated to - pub version: String, - /// Old version currently installed - pub old_version: String, - /// Package origin - pub origin: String, - /// Package priority in human-readable form - pub priority: String, - /// Package section - pub section: String, - /// Custom extra field for additional package information - #[serde(skip_serializing_if = "Option::is_none")] - pub extra_info: Option, -} - #[api()] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] From 0106a14d432f3ad772ba1da6def2e62dff392b75 Mon Sep 17 00:00:00 2001 From: Maximiliano Sandoval Date: Wed, 7 Aug 2024 14:10:42 +0200 Subject: [PATCH 260/299] fix typos in rust documentation blocks Signed-off-by: Maximiliano Sandoval --- pbs-api-types/src/acl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index ef639862..a8ae57a9 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -223,7 +223,7 @@ pub enum Role { RemoteAudit = ROLE_REMOTE_AUDIT, /// Remote Administrator RemoteAdmin = ROLE_REMOTE_ADMIN, - /// Syncronisation Opertator + /// Synchronization Operator RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, /// Tape Auditor TapeAudit = ROLE_TAPE_AUDIT, From 35e466410c11ddb3de91c7a6f28c3672493302f1 Mon Sep 17 00:00:00 2001 From: Maximiliano Sandoval Date: Wed, 7 Aug 2024 14:10:44 +0200 Subject: [PATCH 261/299] fix typos in strings Signed-off-by: Maximiliano Sandoval --- pbs-api-types/src/ldap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index f3df90a0..a3e0407b 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -149,7 +149,7 @@ pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults .schema(); const REMOVE_VANISHED_DESCRIPTION: &str = - "A semicolon-seperated list of things to remove when they or the user \ + "A semicolon-separated list of things to remove when they or the user \ vanishes during user synchronization. The following values are possible: ``entry`` removes the \ user when not returned from the sync; ``properties`` removes any \ properties on existing user that do not appear in the source. \ From 3e83acfd896dcc95caa69fa076a96531954a952b Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Tue, 6 Aug 2024 14:59:56 +0200 Subject: [PATCH 262/299] api-types: rrd: use api-types from proxmox-rrd Signed-off-by: Lukas Wagner --- pbs-api-types/src/lib.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 40bcd8f1..635292a5 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -317,36 +317,6 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), }; -#[api()] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "UPPERCASE")] -/// RRD consolidation mode -pub enum RRDMode { - /// Maximum - Max, - /// Average - Average, -} - -#[api()] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// RRD time frame -pub enum RRDTimeFrame { - /// Hour - Hour, - /// Day - Day, - /// Week - Week, - /// Month - Month, - /// Year - Year, - /// Decade (10 years) - Decade, -} - #[api] #[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] #[serde(rename_all = "lowercase")] From 959b7745f756e4654f6b2d54a6c188f779046511 Mon Sep 17 00:00:00 2001 From: Maximiliano Sandoval Date: Tue, 13 Aug 2024 10:44:06 +0200 Subject: [PATCH 263/299] api-types: remove unused lazy_static dependency Signed-off-by: Maximiliano Sandoval --- pbs-api-types/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 808ff514..17c946fe 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -9,7 +9,6 @@ description = "general API type helpers for PBS" anyhow.workspace = true const_format.workspace = true hex.workspace = true -lazy_static.workspace = true percent-encoding.workspace = true regex.workspace = true serde.workspace = true From 1d5d7b7f9804ef61fb9bb598c725eca16c16ae59 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Fri, 9 Aug 2024 10:20:32 +0200 Subject: [PATCH 264/299] fix #5622: backup client: properly handle rate/burst parameters The rate and burst parameters are integers, so the mapping from value with `.as_str()` will always return `None` effectively never applying any rate limit at all. Fix it by turning them into a HumanByte instead of an integer. To not crowd the parameter section so much, create a ClientRateLimitConfig struct that gets flattened into the parameter list of the backup client. To adapt the description of the parameters, add new schemas that copy the `HumanByte` schema but change the description. With this, the rate limit actually works, and there is no lower limit any more. The old TRAFFIC_CONTROL_RATE/BURST_SCHEMAs can be deleted since the client was the only user of them. Signed-off-by: Dominik Csapak --- pbs-api-types/src/traffic_control.rs | 51 ++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 11 deletions(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index fb264531..0da327f2 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use proxmox_human_byte::HumanByte; -use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; +use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater}; use crate::{ CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, @@ -18,16 +18,6 @@ pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") .max_length(32) .schema(); -pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = - IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.") - .minimum(100_000) - .schema(); - -pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = - IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.") - .minimum(1000) - .schema(); - #[api( properties: { "rate-in": { @@ -71,6 +61,45 @@ impl RateLimitConfig { burst_out: burst, } } + + /// Create a [RateLimitConfig] from a [ClientRateLimitConfig] + pub fn from_client_config(limit: ClientRateLimitConfig) -> Self { + Self::with_same_inout(limit.rate, limit.burst) + } +} + +const CLIENT_RATE_LIMIT_SCHEMA: Schema = StringSchema { + description: "Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", + ..*HumanByte::API_SCHEMA.unwrap_string_schema() +} +.schema(); + +const CLIENT_BURST_SCHEMA: Schema = StringSchema { + description: "Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", + ..*HumanByte::API_SCHEMA.unwrap_string_schema() +} +.schema(); + +#[api( + properties: { + rate: { + schema: CLIENT_RATE_LIMIT_SCHEMA, + optional: true, + }, + burst: { + schema: CLIENT_BURST_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Default, Clone)] +#[serde(rename_all = "kebab-case")] +/// Client Rate Limit Configuration +pub struct ClientRateLimitConfig { + #[serde(skip_serializing_if = "Option::is_none")] + rate: Option, + #[serde(skip_serializing_if = "Option::is_none")] + burst: Option, } #[api( From deb07eeb319f3000a204d6b7a4a6b11588814370 Mon Sep 17 00:00:00 2001 From: Lukas Wagner Date: Tue, 15 Oct 2024 10:46:33 +0200 Subject: [PATCH 265/299] pbs-api-types: add types for the new metrics endpoint Signed-off-by: Lukas Wagner --- pbs-api-types/src/metrics.rs | 66 ++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index 23421035..26266529 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -187,3 +187,69 @@ pub struct MetricServerInfo { #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, } + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[api( + properties: { + data: { + type: Array, + items: { + type: MetricDataPoint, + } + } + } +)] +/// Return type for the metric API endpoint +pub struct Metrics { + /// List of metric data points, sorted by timestamp + pub data: Vec, +} + +#[api( + properties: { + id: { + type: String, + }, + metric: { + type: String, + }, + timestamp: { + type: Integer, + }, + }, +)] +/// Metric data point +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct MetricDataPoint { + /// Unique identifier for this metric object, for instance 'node/' + /// or 'qemu/'. + pub id: String, + + /// Name of the metric. + pub metric: String, + + /// Time at which this metric was observed + pub timestamp: i64, + + #[serde(rename = "type")] + pub ty: MetricDataType, + + /// Metric value. + pub value: f64, +} + +#[api] +/// Type of the metric. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum MetricDataType { + /// gauge. + Gauge, + /// counter. + Counter, + /// derive. + Derive, +} + +serde_plain::derive_display_from_serialize!(MetricDataType); +serde_plain::derive_fromstr_from_deserialize!(MetricDataType); From cba72020ddda6d73ba8f196a560ad76f56624111 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 22 Oct 2024 15:25:52 +0200 Subject: [PATCH 266/299] api-types: add missing doc-comment description for api enums this is used as description in the api schema Signed-off-by: Thomas Lamprecht --- pbs-api-types/src/lib.rs | 1 + pbs-api-types/src/tape/drive.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 635292a5..460c7da7 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -270,6 +270,7 @@ pub enum NodePowerCommand { #[api()] #[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] +/// The state (result) of a finished worker task. pub enum TaskStateType { /// Ok OK, diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index 2b788bd6..e00665cd 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -112,6 +112,7 @@ pub struct MamAttribute { #[api()] #[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)] +/// The density of a tape medium, derived from the LTO version. pub enum TapeDensity { /// Unknown (no media loaded) Unknown, From 7bffb9fe92a55705d08eb6874a586e7b50ec40c9 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 13 Nov 2024 16:00:38 +0100 Subject: [PATCH 267/299] config: factor out method to get the absolute datastore path removable datastores will have a PBS-managed mountpoint as path, direct access to the field needs to be replaced with a helper that can account for this. Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 31767417..a5704c93 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -357,6 +357,11 @@ impl DataStoreConfig { } } + /// Returns the absolute path to the datastore content. + pub fn absolute_path(&self) -> String { + self.path.clone() + } + pub fn get_maintenance_mode(&self) -> Option { self.maintenance_mode.as_ref().and_then(|str| { MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new( From 48809ab0db15ba0940bc050391bbf15047f3ce01 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:30 +0100 Subject: [PATCH 268/299] api types: add remote acl path method for `BackupNamespace` Add a `remote_acl_path` helper method for creating acl paths for remote namespaces, to be used by the priv checks on remote datastore namespaces for e.g. the sync job in push direction. Factor out the common path extension into a dedicated method. Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a5704c93..c9aa6b74 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -722,9 +722,7 @@ impl BackupNamespace { Ok(()) } - pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { - let mut path: Vec<&str> = vec!["datastore", store]; - + fn acl_path_extend<'a>(&'a self, mut path: Vec<&'a str>) -> Vec<&'a str> { if self.is_root() { path } else { @@ -733,6 +731,14 @@ impl BackupNamespace { } } + pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { + self.acl_path_extend(vec!["datastore", store]) + } + + pub fn remote_acl_path<'a>(&'a self, remote: &'a str, store: &'a str) -> Vec<&'a str> { + self.acl_path_extend(vec!["remote", remote, store]) + } + /// Check whether this namespace contains another namespace. /// /// If so, the depth is returned. From 8614be4ceb67c5493fbbe1c63aa52a361d8353ab Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:31 +0100 Subject: [PATCH 269/299] api types: implement remote acl path method for sync job Add `remote_acl_path` method which generates the acl path from the sync job configuration. This helper allows to easily generate the acl path from a given sync job config for privilege checks. Signed-off-by: Christian Ebner --- pbs-api-types/src/jobs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 868702bc..bf7a6bd5 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -594,6 +594,14 @@ impl SyncJobConfig { None => vec!["datastore", &self.store], } } + + pub fn remote_acl_path(&self) -> Option> { + let remote = self.remote.as_ref()?; + match &self.remote_ns { + Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)), + None => Some(vec!["remote", remote, &self.remote_store]), + } + } } #[api( From 92b652935b0dfb6a059c82c32e7e2deaf2a38abd Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:32 +0100 Subject: [PATCH 270/299] api types: define remote permissions and roles for push sync Adding the privileges to allow backup, namespace creation and prune on remote targets, to be used for sync jobs in push direction. Also adds dedicated roles setting the required privileges. Signed-off-by: Christian Ebner --- pbs-api-types/src/acl.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index a8ae57a9..e2f97f06 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -58,6 +58,12 @@ constnamedbitmap! { PRIV_REMOTE_MODIFY("Remote.Modify"); /// Remote.Read allows reading data from a configured `Remote` PRIV_REMOTE_READ("Remote.Read"); + /// Remote.DatastoreBackup allows creating new snapshots on remote datastores + PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup"); + /// Remote.DatastoreModify allows to modify remote datastores + PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify"); + /// Remote.DatastorePrune allows deleting snapshots on remote datastores + PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune"); /// Sys.Console allows access to the system's console PRIV_SYS_CONSOLE("Sys.Console"); @@ -160,6 +166,32 @@ pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0 | PRIV_REMOTE_AUDIT | PRIV_REMOTE_READ; +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.SyncPushOperator can read and push snapshots to the remote. +pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots +/// and groups but not create or remove namespaces. +pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP + | PRIV_REMOTE_DATASTORE_PRUNE; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots +/// and groups, as well as create or remove namespaces. +pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP + | PRIV_REMOTE_DATASTORE_MODIFY + | PRIV_REMOTE_DATASTORE_PRUNE; + #[rustfmt::skip] #[allow(clippy::identity_op)] /// Tape.Audit can audit the tape backup configuration and media content @@ -225,6 +257,12 @@ pub enum Role { RemoteAdmin = ROLE_REMOTE_ADMIN, /// Synchronization Operator RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, + /// Synchronisation Operator (push direction) + RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR, + /// Remote Datastore Prune + RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER, + /// Remote Datastore Admin + RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN, /// Tape Auditor TapeAudit = ROLE_TAPE_AUDIT, /// Tape Administrator From 62270f8fef7b830d30188c804fb458b8fc4643f3 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:33 +0100 Subject: [PATCH 271/299] datastore: move `BackupGroupDeleteStats` to api types In preparation for the delete stats to be exposed as return type to the backup group delete api endpoint. Also, rename the private field `unremoved_protected` to a better fitting `protected_snapshots` to be in line with the method names. Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index c9aa6b74..a4a48d2c 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1580,3 +1580,33 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { format!("datastore '{}', namespace '{}'", store, ns) } } + +#[derive(Default)] +pub struct BackupGroupDeleteStats { + // Count of protected snapshots, therefore not removed + protected_snapshots: usize, + // Count of deleted snapshots + removed_snapshots: usize, +} + +impl BackupGroupDeleteStats { + pub fn all_removed(&self) -> bool { + self.protected_snapshots == 0 + } + + pub fn removed_snapshots(&self) -> usize { + self.removed_snapshots + } + + pub fn protected_snapshots(&self) -> usize { + self.protected_snapshots + } + + pub fn increment_removed_snapshots(&mut self) { + self.removed_snapshots += 1; + } + + pub fn increment_protected_snapshots(&mut self) { + self.protected_snapshots += 1; + } +} From 9aaad591c67588b702874a66f877f6a077358457 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:34 +0100 Subject: [PATCH 272/299] api types: implement api type for `BackupGroupDeleteStats` Make the `BackupGroupDeleteStats` exposable via the API by implementing the ApiTypes trait via the api macro invocation and add an additional field to account for the number of deleted groups. Further, add a method to add up the statistics. Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 36 +++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a4a48d2c..3d2b0eab 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1581,8 +1581,28 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { } } -#[derive(Default)] +pub const DELETE_STATS_COUNT_SCHEMA: Schema = + IntegerSchema::new("Number of entities").minimum(0).schema(); + +#[api( + properties: { + "removed-groups": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + "protected-snapshots": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + "removed-snapshots": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + }, +)] +#[derive(Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +/// Statistics for removed backup groups pub struct BackupGroupDeleteStats { + // Count of removed groups + removed_groups: usize, // Count of protected snapshots, therefore not removed protected_snapshots: usize, // Count of deleted snapshots @@ -1594,6 +1614,10 @@ impl BackupGroupDeleteStats { self.protected_snapshots == 0 } + pub fn removed_groups(&self) -> usize { + self.removed_groups + } + pub fn removed_snapshots(&self) -> usize { self.removed_snapshots } @@ -1602,6 +1626,16 @@ impl BackupGroupDeleteStats { self.protected_snapshots } + pub fn add(&mut self, rhs: &Self) { + self.removed_groups += rhs.removed_groups; + self.protected_snapshots += rhs.protected_snapshots; + self.removed_snapshots += rhs.removed_snapshots; + } + + pub fn increment_removed_groups(&mut self) { + self.removed_groups += 1; + } + pub fn increment_removed_snapshots(&mut self) { self.removed_snapshots += 1; } From ba850a25a32ab265a8a52e06f71464adb4aa4049 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:36 +0100 Subject: [PATCH 273/299] api/api-types: refactor api endpoint version, add api types Add a dedicated api type for the `version` api endpoint and helper methods for supported feature comparison. This will be used to detect api incompatibility of older hosts, not supporting some features. Use the new api type to refactor the version endpoint and set it as return type. Signed-off-by: Christian Ebner --- pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/version.rs | 88 ++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 pbs-api-types/src/version.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 460c7da7..6bae4a52 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -155,6 +155,9 @@ pub use zfs::*; mod metrics; pub use metrics::*; +mod version; +pub use version::*; + const_regex! { // just a rough check - dummy acceptor is used before persisting pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs new file mode 100644 index 00000000..7a4c6cb7 --- /dev/null +++ b/pbs-api-types/src/version.rs @@ -0,0 +1,88 @@ +//! Defines the types for the api version info endpoint +use std::convert::TryFrom; + +use anyhow::{format_err, Context}; + +use proxmox_schema::api; + +#[api( + description: "Api version information", + properties: { + "version": { + description: "Version 'major.minor'", + type: String, + }, + "release": { + description: "Version release", + type: String, + }, + "repoid": { + description: "Version repository id", + type: String, + }, + "features": { + description: "List of supported features", + type: Array, + items: { + type: String, + description: "Feature id", + }, + }, + } +)] +#[derive(serde::Deserialize, serde::Serialize)] +pub struct ApiVersionInfo { + pub version: String, + pub release: String, + pub repoid: String, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub features: Vec, +} + +pub type ApiVersionMajor = u64; +pub type ApiVersionMinor = u64; +pub type ApiVersionRelease = u64; + +pub struct ApiVersion { + pub major: ApiVersionMajor, + pub minor: ApiVersionMinor, + pub release: ApiVersionRelease, + pub repoid: String, + pub features: Vec, +} + +impl TryFrom for ApiVersion { + type Error = anyhow::Error; + + fn try_from(value: ApiVersionInfo) -> Result { + let (major, minor) = value + .version + .split_once('.') + .ok_or_else(|| format_err!("malformed API version {}", value.version))?; + + let major: ApiVersionMajor = major + .parse() + .with_context(|| "failed to parse major version")?; + let minor: ApiVersionMinor = minor + .parse() + .with_context(|| "failed to parse minor version")?; + let release: ApiVersionRelease = value + .release + .parse() + .with_context(|| "failed to parse release version")?; + + Ok(Self { + major, + minor, + release, + repoid: value.repoid.clone(), + features: value.features.clone(), + }) + } +} + +impl ApiVersion { + pub fn supports_feature(&self, feature: &str) -> bool { + self.features.iter().any(|f| f == feature) + } +} From 2f4c9f784e70000fb386fa82dc3058246a169cb7 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 11 Nov 2024 16:43:38 +0100 Subject: [PATCH 274/299] api types/config: add `sync-push` config type for push sync jobs In order for sync jobs to be either pull or push jobs, allow to configure the direction of the job. Adds an additional config type `sync-push` to the sync job config, to clearly distinguish sync jobs configured in pull and in push direction and defines and implements the required `SyncDirection` api type. This approach was chosen in order to limit possible misconfiguration, as unintentionally switching the sync direction could potentially delete still required snapshots. Signed-off-by: Christian Ebner --- pbs-api-types/src/jobs.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index bf7a6bd5..e8056beb 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -498,6 +498,44 @@ pub const TRANSFER_LAST_SCHEMA: Schema = .minimum(1) .schema(); +#[api()] +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Direction of the sync job, push or pull +pub enum SyncDirection { + /// Sync direction pull + #[default] + Pull, + /// Sync direction push + Push, +} + +impl std::fmt::Display for SyncDirection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SyncDirection::Pull => f.write_str("pull"), + SyncDirection::Push => f.write_str("push"), + } + } +} + +impl SyncDirection { + pub fn as_config_type_str(&self) -> &'static str { + match self { + SyncDirection::Pull => "sync", + SyncDirection::Push => "sync-push", + } + } + + pub fn from_config_type_str(config_type: &str) -> Result { + match config_type { + "sync" => Ok(SyncDirection::Pull), + "sync-push" => Ok(SyncDirection::Push), + _ => bail!("invalid config type for sync job"), + } + } +} + #[api( properties: { id: { From fda1f99479e2b86cefa3f8d86378d0b7161f05f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Thu, 21 Nov 2024 10:13:38 +0100 Subject: [PATCH 275/299] version: remove named features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and use version comparison for the push code that previously used it. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/version.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs index 7a4c6cb7..80f87e37 100644 --- a/pbs-api-types/src/version.rs +++ b/pbs-api-types/src/version.rs @@ -20,14 +20,6 @@ use proxmox_schema::api; description: "Version repository id", type: String, }, - "features": { - description: "List of supported features", - type: Array, - items: { - type: String, - description: "Feature id", - }, - }, } )] #[derive(serde::Deserialize, serde::Serialize)] @@ -35,8 +27,6 @@ pub struct ApiVersionInfo { pub version: String, pub release: String, pub repoid: String, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub features: Vec, } pub type ApiVersionMajor = u64; @@ -48,7 +38,6 @@ pub struct ApiVersion { pub minor: ApiVersionMinor, pub release: ApiVersionRelease, pub repoid: String, - pub features: Vec, } impl TryFrom for ApiVersion { @@ -76,13 +65,6 @@ impl TryFrom for ApiVersion { minor, release, repoid: value.repoid.clone(), - features: value.features.clone(), }) } } - -impl ApiVersion { - pub fn supports_feature(&self, feature: &str) -> bool { - self.features.iter().any(|f| f == feature) - } -} From a2773ddd799fbc1e628b07ee1f1468509f8ff5b6 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Fri, 22 Nov 2024 11:30:07 +0100 Subject: [PATCH 276/299] datastore: move `ArchiveType` to api types Moving the `ArchiveType` to avoid crate dependencies on `pbs-datastore`. In preparation for introducing a dedicated `BackupArchiveName` api type, allowing to set the corresponding archive type variant when parsing the archive name based on it's filename. Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 3d2b0eab..3b9c206d 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1644,3 +1644,24 @@ impl BackupGroupDeleteStats { self.protected_snapshots += 1; } } + +#[derive(PartialEq, Eq)] +/// Allowed variants of backup archives to be contained in a snapshot's manifest +pub enum ArchiveType { + FixedIndex, + DynamicIndex, + Blob, +} + +impl ArchiveType { + pub fn from_path(archive_name: impl AsRef) -> Result { + let archive_name = archive_name.as_ref(); + let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { + Some("didx") => ArchiveType::DynamicIndex, + Some("fidx") => ArchiveType::FixedIndex, + Some("blob") => ArchiveType::Blob, + _ => bail!("unknown archive type: {archive_name:?}"), + }; + Ok(archive_type) + } +} From 0d66acd390272e9cfc7e90ccbc74f1dd6f570eac Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Fri, 22 Nov 2024 11:30:08 +0100 Subject: [PATCH 277/299] api types: introduce `BackupArchiveName` type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces a dedicated wrapper type to be used for backup archive names instead of plain strings and associated helper methods for archive type checks and archive name mappings. Signed-off-by: Christian Ebner FG: use LazyLock for constant archive names reduces churn, and saves some allocations Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 147 ++++++++++++++++++++++++++++++++- 1 file changed, 146 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 3b9c206d..458fbf84 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,8 @@ +use std::convert::{AsRef, TryFrom}; use std::fmt; use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1645,7 +1648,7 @@ impl BackupGroupDeleteStats { } } -#[derive(PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] /// Allowed variants of backup archives to be contained in a snapshot's manifest pub enum ArchiveType { FixedIndex, @@ -1664,4 +1667,146 @@ impl ArchiveType { }; Ok(archive_type) } + + pub fn extension(&self) -> &'static str { + match self { + ArchiveType::DynamicIndex => "didx", + ArchiveType::FixedIndex => "fidx", + ArchiveType::Blob => "blob", + } + } +} + +#[derive(Clone, PartialEq, Eq)] +/// Name of archive files contained in snapshot's manifest +pub struct BackupArchiveName { + // archive name including the `.fidx`, `.didx` or `.blob` archive type extension + name: String, + // archive type parsed based on given extension + ty: ArchiveType, +} + +pub static MANIFEST_BLOB_NAME: LazyLock = LazyLock::new(|| BackupArchiveName { + name: "index.json.blob".to_string(), + ty: ArchiveType::Blob, +}); + +pub static CATALOG_NAME: LazyLock = LazyLock::new(|| BackupArchiveName { + name: "catalog.pcat1.didx".to_string(), + ty: ArchiveType::DynamicIndex, +}); + +pub static CLIENT_LOG_BLOB_NAME: LazyLock = + LazyLock::new(|| BackupArchiveName { + name: "client.log.blob".to_string(), + ty: ArchiveType::Blob, + }); + +pub static ENCRYPTED_KEY_BLOB_NAME: LazyLock = + LazyLock::new(|| BackupArchiveName { + name: "rsa-encrypted.key.blob".to_string(), + ty: ArchiveType::Blob, + }); + +impl fmt::Display for BackupArchiveName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{name}", name = self.name) + } +} + +serde_plain::derive_deserialize_from_fromstr!(BackupArchiveName, "archive name"); + +impl FromStr for BackupArchiveName { + type Err = Error; + + fn from_str(name: &str) -> Result { + Self::try_from(name) + } +} + +serde_plain::derive_serialize_from_display!(BackupArchiveName); + +impl TryFrom<&str> for BackupArchiveName { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + let (name, ty) = Self::parse_archive_type(value)?; + Ok(Self { name, ty }) + } +} + +impl AsRef for BackupArchiveName { + fn as_ref(&self) -> &str { + &self.name + } +} + +impl BackupArchiveName { + pub fn from_path(path: impl AsRef) -> Result { + let path = path.as_ref(); + if path.as_os_str().as_encoded_bytes().last() == Some(&b'/') { + bail!("invalid archive name, got directory"); + } + let file_name = path + .file_name() + .ok_or_else(|| format_err!("invalid archive name"))?; + let name = file_name + .to_str() + .ok_or_else(|| format_err!("archive name not valid UTF-8"))?; + + Self::try_from(name) + } + + pub fn archive_type(&self) -> ArchiveType { + self.ty.clone() + } + + pub fn ends_with(&self, postfix: &str) -> bool { + self.name.ends_with(postfix) + } + + pub fn has_pxar_filename_extension(&self) -> bool { + self.name.ends_with(".pxar.didx") + || self.name.ends_with(".mpxar.didx") + || self.name.ends_with(".ppxar.didx") + } + + pub fn without_type_extension(&self) -> String { + self.name + .strip_suffix(&format!(".{ext}", ext = self.ty.extension())) + .unwrap() + .into() + } + + fn parse_archive_type(archive_name: &str) -> Result<(String, ArchiveType), Error> { + // Detect archive type via given server archive name type extension, if present + if let Ok(archive_type) = ArchiveType::from_path(archive_name) { + return Ok((archive_name.into(), archive_type)); + } + + // No server archive name type extension in archive name, map based on extension + let archive_type = match Path::new(archive_name) + .extension() + .and_then(|ext| ext.to_str()) + { + Some("pxar") => ArchiveType::DynamicIndex, + Some("mpxar") => ArchiveType::DynamicIndex, + Some("ppxar") => ArchiveType::DynamicIndex, + Some("pcat1") => ArchiveType::DynamicIndex, + Some("img") => ArchiveType::FixedIndex, + Some("json") => ArchiveType::Blob, + Some("key") => ArchiveType::Blob, + Some("log") => ArchiveType::Blob, + _ => bail!("failed to parse archive type for '{archive_name}'"), + }; + + Ok(( + format!("{archive_name}.{ext}", ext = archive_type.extension()), + archive_type, + )) + } +} + +impl ApiType for BackupArchiveName { + const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } From 5a22076e677c3d73d92d609e953b18bb4e71ace1 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Fri, 22 Nov 2024 11:30:11 +0100 Subject: [PATCH 278/299] api types: add unit tests for backup archive name parsing Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 458fbf84..666797fc 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1810,3 +1810,67 @@ impl BackupArchiveName { impl ApiType for BackupArchiveName { const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_invalid_backup_archive_names() { + let invalid_archive_names = ["/invalid/", "/invalid/..", "/invalid/archive-name.invalid"]; + + for archive_name in invalid_archive_names { + assert!(BackupArchiveName::from_path(archive_name).is_err()); + } + } + + #[test] + fn test_valid_didx_backup_archive_names() { + let valid_archive_names = [ + "/valid/archive-name.pxar", + "/valid/archive-name.pxar.didx", + "/valid/archive-name.mpxar", + "/valid/archive-name.mpxar.didx", + "/valid/archive-name.ppxar", + "/valid/archive-name.ppxar.didx", + "/valid/archive-name.pcat1", + "/valid/archive-name.pcat1.didx", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".didx")); + assert!(archive.archive_type() == ArchiveType::DynamicIndex); + } + } + + #[test] + fn test_valid_fidx_backup_archive_names() { + let valid_archive_names = ["/valid/archive-name.img", "/valid/archive-name.img.fidx"]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref() == "archive-name.img.fidx"); + assert!(archive.without_type_extension() == "archive-name.img"); + assert!(archive.archive_type() == ArchiveType::FixedIndex); + } + } + + #[test] + fn test_valid_blob_backup_archive_names() { + let valid_archive_names = [ + "/valid/index.json", + "/valid/index.json.blob", + "/valid/rsa-encrypted.key", + "/valid/rsa-encrypted.key.blob", + "/valid/archive-name.log", + "/valid/archive-name.log.blob", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".blob")); + assert!(archive.archive_type() == ArchiveType::Blob); + } + } +} From 916c46905b38ea0f21afb49457f4d4af93bbefd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 22 Nov 2024 12:07:10 +0100 Subject: [PATCH 279/299] api types: extend backup archive name parsing tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and also test the error triggered by a directory path being passed in. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 666797fc..711051d0 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1817,7 +1817,13 @@ mod tests { #[test] fn test_invalid_backup_archive_names() { - let invalid_archive_names = ["/invalid/", "/invalid/..", "/invalid/archive-name.invalid"]; + let invalid_archive_names = [ + "/invalid/", + "/invalid/archive-name.pxar/", + "/invalid/archive-name.pxar.didx/", + "/invalid/..", + "/invalid/archive-name.invalid", + ]; for archive_name in invalid_archive_names { assert!(BackupArchiveName::from_path(archive_name).is_err()); From 32969b47e13e2d6cf4f518926318152c4731eb0b Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Fri, 22 Nov 2024 13:16:15 +0100 Subject: [PATCH 280/299] fix #3786: api: add resync-corrupt option to sync jobs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This option allows us to "fix" corrupt snapshots (and/or their chunks) by pulling them from another remote. When traversing the remote snapshots, we check if it exists locally, and if it is, we check if the last verification of it failed. If the local snapshot is broken and the `resync-corrupt` option is turned on, we pull in the remote snapshot, overwriting the local one. This is very useful and has been requested a lot, as there is currently no way to "fix" corrupt chunks/snapshots even if the user has a healthy version of it on their offsite instance. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Grünbichler --- pbs-api-types/src/jobs.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e8056beb..52520811 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -536,6 +536,10 @@ impl SyncDirection { } } +pub const RESYNC_CORRUPT_SCHEMA: Schema = + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") + .schema(); + #[api( properties: { id: { @@ -590,6 +594,10 @@ impl SyncDirection { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + } } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -623,6 +631,8 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, #[serde(skip_serializing_if = "Option::is_none")] pub transfer_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub resync_corrupt: Option, } impl SyncJobConfig { From 85256a6b6cd67899e1c6ec81d12860a56399e9e6 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Tue, 12 Nov 2024 11:43:13 +0100 Subject: [PATCH 281/299] api-types: implement dedicated api type for match patterns Introduces a dedicated api type `PathPattern` and the corresponding format and input validation schema. Further, add a `PathPatterns` type for collections of path patterns and implement required traits to be able to replace currently defined api parameters. In preparation for using this common api type for all api endpoints exposing a match pattern parameter. Signed-off-by: Christian Ebner --- pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/pathpatterns.rs | 55 +++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 pbs-api-types/src/pathpatterns.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 6bae4a52..e73f1f8a 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -143,6 +143,9 @@ pub use ad::*; mod remote; pub use remote::*; +mod pathpatterns; +pub use pathpatterns::*; + mod tape; pub use tape::*; diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs new file mode 100644 index 00000000..c40926a4 --- /dev/null +++ b/pbs-api-types/src/pathpatterns.rs @@ -0,0 +1,55 @@ +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; + +use serde::{Deserialize, Serialize}; + +const_regex! { + pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$"); +} + +pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX); + +pub const PATH_PATTERN_SCHEMA: Schema = + StringSchema::new("Path or match pattern for matching filenames.") + .format(&PATH_PATTERN_FORMAT) + .schema(); + +pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( + "List of paths or match patterns for matching filenames.", + &PATH_PATTERN_SCHEMA, +) +.schema(); + +#[derive(Default, Deserialize, Serialize)] +/// Path or path pattern for filename matching +pub struct PathPattern { + pattern: String, +} + +impl ApiType for PathPattern { + const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA; +} + +impl AsRef<[u8]> for PathPattern { + fn as_ref(&self) -> &[u8] { + self.pattern.as_bytes() + } +} + +#[derive(Default, Deserialize, Serialize)] +/// Array of paths and/or path patterns for filename matching +pub struct PathPatterns { + patterns: Vec, +} + +impl ApiType for PathPatterns { + const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; +} + +impl IntoIterator for PathPatterns { + type Item = PathPattern; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.patterns.into_iter() + } +} From 6e3c5afce51860e58c8debfd356a5342747ea687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Mon, 25 Nov 2024 12:02:09 +0100 Subject: [PATCH 282/299] api types: replace PathPatterns with Vec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PathPatterns is hard to distinguish from PathPattern, so would need to be renamed anyway.. but there isn't really a reason to define a separate API type just for this. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/pathpatterns.rs | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs index c40926a4..505ecc8a 100644 --- a/pbs-api-types/src/pathpatterns.rs +++ b/pbs-api-types/src/pathpatterns.rs @@ -1,4 +1,4 @@ -use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema}; use serde::{Deserialize, Serialize}; @@ -13,12 +13,6 @@ pub const PATH_PATTERN_SCHEMA: Schema = .format(&PATH_PATTERN_FORMAT) .schema(); -pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( - "List of paths or match patterns for matching filenames.", - &PATH_PATTERN_SCHEMA, -) -.schema(); - #[derive(Default, Deserialize, Serialize)] /// Path or path pattern for filename matching pub struct PathPattern { @@ -34,22 +28,3 @@ impl AsRef<[u8]> for PathPattern { self.pattern.as_bytes() } } - -#[derive(Default, Deserialize, Serialize)] -/// Array of paths and/or path patterns for filename matching -pub struct PathPatterns { - patterns: Vec, -} - -impl ApiType for PathPatterns { - const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; -} - -impl IntoIterator for PathPatterns { - type Item = PathPattern; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.patterns.into_iter() - } -} From b51b0be153a6f4e9028909f29822d6b8bcad7c91 Mon Sep 17 00:00:00 2001 From: Shannon Sterz Date: Fri, 4 Oct 2024 15:40:53 +0200 Subject: [PATCH 283/299] api: enforce minimum character limit of 8 on new passwords we already have two different password schemas, `PBS_PASSWORD_SCHEMA` being the stricter one, which ensures a minimum length of new passwords. however, this wasn't used on the change password endpoint before, so add it there too. this is also in-line with NIST's latest recommendations [1]. [1]: https://pages.nist.gov/800-63-4/sp800-63b.html#passwordver Signed-off-by: Shannon Sterz --- pbs-api-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index e73f1f8a..acc2fca3 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -214,7 +214,7 @@ pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.") .format(&PASSWORD_FORMAT) - .min_length(5) + .min_length(8) .max_length(64) .schema(); From d291f6723619c4221c94b1caac7ecf50b798ab7e Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Mon, 25 Nov 2024 17:21:48 +0100 Subject: [PATCH 284/299] pbs-api-types: add backing-device to DataStoreConfig Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 711051d0..8827604c 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -45,7 +45,7 @@ const_regex! { pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") .min_length(1) .max_length(4096) .schema(); @@ -163,6 +163,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); +/// Base directory where datastores are mounted +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; + #[api] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -237,7 +240,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore schema: DATASTORE_SCHEMA, }, path: { - schema: DIR_NAME_SCHEMA, + schema: DATASTORE_DIR_NAME_SCHEMA, }, "notify-user": { optional: true, @@ -276,6 +279,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), type: String, }, + "backing-device": { + description: "The UUID of the filesystem partition for removable datastores.", + optional: true, + format: &proxmox_schema::api_types::UUID_FORMAT, + type: String, + } } )] #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] @@ -323,6 +332,11 @@ pub struct DataStoreConfig { /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_mode: Option, + + /// The UUID of the device(for removable datastores) + #[updater(skip)] + #[serde(skip_serializing_if = "Option::is_none")] + pub backing_device: Option, } #[api] @@ -357,12 +371,17 @@ impl DataStoreConfig { notification_mode: None, tuning: None, maintenance_mode: None, + backing_device: None, } } /// Returns the absolute path to the datastore content. pub fn absolute_path(&self) -> String { - self.path.clone() + if self.backing_device.is_some() { + format!("{DATASTORE_MOUNT_DIR}/{}", self.name) + } else { + self.path.clone() + } } pub fn get_maintenance_mode(&self) -> Option { From 6134a73b1e4cb31eabbe42f424985d81a0e65156 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Mon, 25 Nov 2024 17:21:49 +0100 Subject: [PATCH 285/299] maintenance: make is_offline more generic ... and add MaintenanceType::Delete to it. We also want to clear any cach entries if we are deleting the datastore, not just if it is marked as offline. Signed-off-by: Hannes Laimer --- pbs-api-types/src/maintenance.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 1e3413dc..a7b8b078 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -77,10 +77,9 @@ pub struct MaintenanceMode { } impl MaintenanceMode { - /// Used for deciding whether the datastore is cleared from the internal cache after the last - /// task finishes, so all open files are closed. - pub fn is_offline(&self) -> bool { - self.ty == MaintenanceType::Offline + /// Used for deciding whether the datastore is cleared from the internal cache + pub fn clear_from_cache(&self) -> bool { + self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete } pub fn check(&self, operation: Option) -> Result<(), Error> { From fd1f8413f7f79a309b044f68ecd9ead8f3eaf6da Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 25 Nov 2024 17:21:50 +0100 Subject: [PATCH 286/299] maintenance: add 'Unmount' maintenance type Signed-off-by: Dietmar Maurer Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 3 +++ pbs-api-types/src/maintenance.rs | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8827604c..9bcec719 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -401,6 +401,9 @@ impl DataStoreConfig { match current_type { Some(MaintenanceType::ReadOnly) => { /* always OK */ } Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Unmount) => { + bail!("datastore is being unmounted"); + } Some(MaintenanceType::Delete) => { match new_type { Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index a7b8b078..3c9aa819 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -38,7 +38,6 @@ pub enum Operation { /// Maintenance type. pub enum MaintenanceType { // TODO: - // - Add "unmounting" once we got pluggable datastores // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate // operation, so that one can enable a mode where nothing new can be added but stuff can be // cleaned @@ -48,6 +47,8 @@ pub enum MaintenanceType { Offline, /// The datastore is being deleted. Delete, + /// The (removable) datastore is being unmounted. + Unmount, } serde_plain::derive_display_from_serialize!(MaintenanceType); serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); @@ -79,7 +80,9 @@ pub struct MaintenanceMode { impl MaintenanceMode { /// Used for deciding whether the datastore is cleared from the internal cache pub fn clear_from_cache(&self) -> bool { - self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete + self.ty == MaintenanceType::Offline + || self.ty == MaintenanceType::Delete + || self.ty == MaintenanceType::Unmount } pub fn check(&self, operation: Option) -> Result<(), Error> { @@ -93,6 +96,8 @@ impl MaintenanceMode { if let Some(Operation::Lookup) = operation { return Ok(()); + } else if self.ty == MaintenanceType::Unmount { + bail!("datastore is being unmounted"); } else if self.ty == MaintenanceType::Offline { bail!("offline maintenance mode: {}", message); } else if self.ty == MaintenanceType::ReadOnly { From 35fb5d4f7fb0e9f21fd4bb39faaa094e18529e3f Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Mon, 25 Nov 2024 17:21:55 +0100 Subject: [PATCH 287/299] pbs-api-types: add mount_status field to DataStoreListItem Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 9bcec719..4927f372 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -178,6 +178,20 @@ pub enum ChunkOrder { Inode, } +#[api] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Current mounting status of a datastore, useful for removable datastores. +pub enum DataStoreMountStatus { + /// Removable datastore is currently mounted correctly. + Mounted, + /// Removable datastore is currebtly not mounted. + NotMounted, + /// Datastore is not removable, so there is no mount status. + #[default] + NonRemovable, +} + #[api] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -451,6 +465,7 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + pub mount_status: DataStoreMountStatus, /// If the datastore is in maintenance mode, information about it #[serde(skip_serializing_if = "Option::is_none")] pub maintenance: Option, @@ -1456,6 +1471,7 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) #[serde(skip_serializing_if = "Option::is_none")] pub avail: Option, + pub mount_status: DataStoreMountStatus, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, @@ -1480,12 +1496,13 @@ pub struct DataStoreStatusListItem { } impl DataStoreStatusListItem { - pub fn empty(store: &str, err: Option) -> Self { + pub fn empty(store: &str, err: Option, mount_status: DataStoreMountStatus) -> Self { DataStoreStatusListItem { store: store.to_owned(), total: None, used: None, avail: None, + mount_status, history: None, history_start: None, history_delta: None, From 2ed9c4bfca028ea1f201797b7e1ab3323628c637 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 26 Nov 2024 12:43:19 +0100 Subject: [PATCH 288/299] api: maintenance: allow setting of maintenance mode if 'unmounting' So it is possible to reset it after a failed unmount, or abort an unmount task by resetting it through the API. Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 4927f372..203e75e3 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -416,7 +416,7 @@ impl DataStoreConfig { Some(MaintenanceType::ReadOnly) => { /* always OK */ } Some(MaintenanceType::Offline) => { /* always OK */ } Some(MaintenanceType::Unmount) => { - bail!("datastore is being unmounted"); + /* used to reset it after failed unmount, or alternative for aborting unmount task */ } Some(MaintenanceType::Delete) => { match new_type { From f1f8c65c70dbfdececdef6f91a245cbac6f00968 Mon Sep 17 00:00:00 2001 From: Hannes Laimer Date: Tue, 26 Nov 2024 12:43:21 +0100 Subject: [PATCH 289/299] api: types: add 'mount_status' to schema ... and deserialize with default if field is missing in data. Reported-by: Aaron Lauterer Fixes: 35fb5d4f7f ("pbs-api-types: add mount_status field to DataStoreListItem") Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 203e75e3..90f1195b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -452,6 +452,9 @@ impl DataStoreConfig { optional: true, schema: SINGLE_LINE_COMMENT_SCHEMA, }, + "mount-status": { + type: DataStoreMountStatus, + }, maintenance: { optional: true, format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), @@ -465,6 +468,7 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + #[serde(default)] pub mount_status: DataStoreMountStatus, /// If the datastore is in maintenance mode, information about it #[serde(skip_serializing_if = "Option::is_none")] @@ -1447,6 +1451,9 @@ pub struct DataStoreStatus { store: { schema: DATASTORE_SCHEMA, }, + "mount-status": { + type: DataStoreMountStatus, + }, history: { type: Array, optional: true, @@ -1471,6 +1478,7 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) #[serde(skip_serializing_if = "Option::is_none")] pub avail: Option, + #[serde(default)] pub mount_status: DataStoreMountStatus, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] From bfffbef9b24f6977d0377c14d53489366a3c61e8 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Tue, 26 Nov 2024 13:24:19 +0100 Subject: [PATCH 290/299] api types: add missing conf to blob archive name mapping Commit 0d66acd3 ("api types: introduce `BackupArchiveName` type") introduced a dedicated archive name api type to add rust type checking and bundle helpers to the api type. Since this, the backup archive name to server archive name mapping is handled by its parser. This however did not cover the `.conf` extension used for VM config files. Add the missing `.conf` to `.conf.blob` to the match statement and the test cases. Fixes: 0d66acd3 ("api types: introduce `BackupArchiveName` type") Reported-by: Stoiko Ivanov Signed-off-by: Christian Ebner --- pbs-api-types/src/datastore.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 90f1195b..d3876838 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1841,6 +1841,7 @@ impl BackupArchiveName { Some("ppxar") => ArchiveType::DynamicIndex, Some("pcat1") => ArchiveType::DynamicIndex, Some("img") => ArchiveType::FixedIndex, + Some("conf") => ArchiveType::Blob, Some("json") => ArchiveType::Blob, Some("key") => ArchiveType::Blob, Some("log") => ArchiveType::Blob, @@ -1918,6 +1919,8 @@ mod tests { "/valid/rsa-encrypted.key.blob", "/valid/archive-name.log", "/valid/archive-name.log.blob", + "/valid/qemu-server.conf", + "/valid/qemu-server.conf.blob", ]; for archive_name in valid_archive_names { From efc45db20cecc460ffc0bb3e4410577acaac842a Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Mon, 25 Nov 2024 12:15:32 +0100 Subject: [PATCH 291/299] api: admin: sync: add direction to sync job status Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 52520811..e18197fb 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -660,6 +660,9 @@ impl SyncJobConfig { status: { type: JobScheduleStatus, }, + direction: { + type: SyncDirection, + }, }, )] #[derive(Serialize, Deserialize, Clone, PartialEq)] @@ -670,6 +673,9 @@ pub struct SyncJobStatus { pub config: SyncJobConfig, #[serde(flatten)] pub status: JobScheduleStatus, + + /// The direction of the job + pub direction: SyncDirection, } /// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API From d70f6d9f093681dffbe2f8993e34026ef057e68f Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 25 Nov 2024 18:40:10 +0100 Subject: [PATCH 292/299] api: admin/config: introduce sync direction as job config parameter Add the sync direction for the sync job as optional config parameter and refrain from using the config section type for conditional direction check, as they are now the same (see previous commit). Use the configured sync job parameter instead of passing it to the various methods as function parameter and only filter based on sync direction if an optional api parameter to distingush/filter based on direction is given. Signed-off-by: Christian Ebner Reviewed-by: Dominik Csapak Tested-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e18197fb..4a85378c 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -597,7 +597,11 @@ pub const RESYNC_CORRUPT_SCHEMA: Schema = "resync-corrupt": { schema: RESYNC_CORRUPT_SCHEMA, optional: true, - } + }, + "sync-direction": { + type: SyncDirection, + optional: true, + }, } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -633,6 +637,8 @@ pub struct SyncJobConfig { pub transfer_last: Option, #[serde(skip_serializing_if = "Option::is_none")] pub resync_corrupt: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_direction: Option, } impl SyncJobConfig { From ce7239c24ab9ac7362d49a883402bf807d1b6399 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Mon, 25 Nov 2024 18:40:12 +0100 Subject: [PATCH 293/299] api types: drop unused config type helpers for sync direction Jobs for both sync directions are now stored using the same `sync` config section type, so drop the outdated helpers. Signed-off-by: Christian Ebner Reviewed-by: Dominik Csapak Tested-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 4a85378c..16b16dd8 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -519,23 +519,6 @@ impl std::fmt::Display for SyncDirection { } } -impl SyncDirection { - pub fn as_config_type_str(&self) -> &'static str { - match self { - SyncDirection::Pull => "sync", - SyncDirection::Push => "sync-push", - } - } - - pub fn from_config_type_str(config_type: &str) -> Result { - match config_type { - "sync" => Ok(SyncDirection::Pull), - "sync-push" => Ok(SyncDirection::Push), - _ => bail!("invalid config type for sync job"), - } - } -} - pub const RESYNC_CORRUPT_SCHEMA: Schema = BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") .schema(); From 7ae942f941f28cd0b36fdb4b6e3b2430993478a0 Mon Sep 17 00:00:00 2001 From: Dominik Csapak Date: Tue, 26 Nov 2024 15:47:34 +0100 Subject: [PATCH 294/299] sync jobs: remove superfluous direction property since the SyncJobConfig struct now contains a 'sync-direction' property, we can omit the 'direction' property of the SyncJobStatus struct. This makes a few adaptions in the ui necessary: * use the correct field * handle 'pull' as default (since we don't necessarily get a 'sync-direction' in that case) Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 16b16dd8..04631d92 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -649,9 +649,6 @@ impl SyncJobConfig { status: { type: JobScheduleStatus, }, - direction: { - type: SyncDirection, - }, }, )] #[derive(Serialize, Deserialize, Clone, PartialEq)] @@ -662,9 +659,6 @@ pub struct SyncJobStatus { pub config: SyncJobConfig, #[serde(flatten)] pub status: JobScheduleStatus, - - /// The direction of the job - pub direction: SyncDirection, } /// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API From a36d3fdf295a4953c6a8d06cfc21156c0a81a156 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Wed, 27 Nov 2024 15:11:27 +0100 Subject: [PATCH 295/299] datastore: extract nesting check into helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and improve the variable namign while we are at it. this allows the check to be re-used in other code paths, like when starting a garbage collection. Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/datastore.rs | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index d3876838..ddd8d3c6 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -441,6 +441,45 @@ impl DataStoreConfig { Ok(()) } + + pub fn ensure_not_nested(&self, stores: &[DataStoreConfig]) -> Result<(), Error> { + let our_absolute_path = PathBuf::from(self.absolute_path()); + let removable = self.backing_device.is_some(); + for other_store in stores { + if self == other_store { + continue; + }; + + // Relative paths must not be nested on the backing device of removable datastores + if removable && other_store.backing_device == self.backing_device { + let our_relative_path = Path::new(&self.path); + let other_relative_path = Path::new(&other_store.path); + if our_relative_path.starts_with(other_relative_path) + || other_relative_path.starts_with(our_relative_path) + { + bail!( + "paths on backing device must not be nested - {path:?} already used by '{store}'!", + path = other_relative_path, + store = other_store.name, + ); + } + } + + // No two datastores should have a nested absolute path + let other_absolute_path = PathBuf::from(other_store.absolute_path()); + if other_absolute_path.starts_with(&our_absolute_path) + || our_absolute_path.starts_with(&other_absolute_path) + { + bail!( + "nested datastores not allowed: '{}' already in {:?}", + other_store.name, + other_absolute_path, + ); + } + } + + Ok(()) + } } #[api( From 4308bb0ca936ab62564929bc7d8c15cddc2689fe Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Thu, 28 Nov 2024 17:07:19 +0100 Subject: [PATCH 296/299] api types: version: drop unused `repoid` field The `ApiVersion` type was introduced in commit ba850a25 ("api/api-types: refactor api endpoint version, add api types") including the `repoid`, added for completeness when converting from a pre-existing `ApiVersionInfo` instance, as returned by the `version` api endpoint. Drop the additional `repoid` field, since this is currently not used, can be obtained fro the `ApiVersionInfo` as well and only hinders the implementation for easy api version comparison. Signed-off-by: Christian Ebner --- pbs-api-types/src/version.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs index 80f87e37..bd4c517d 100644 --- a/pbs-api-types/src/version.rs +++ b/pbs-api-types/src/version.rs @@ -37,7 +37,6 @@ pub struct ApiVersion { pub major: ApiVersionMajor, pub minor: ApiVersionMinor, pub release: ApiVersionRelease, - pub repoid: String, } impl TryFrom for ApiVersion { @@ -64,7 +63,6 @@ impl TryFrom for ApiVersion { major, minor, release, - repoid: value.repoid.clone(), }) } } From 00a198090338448bc3159fee8e201ccc8a2a2c86 Mon Sep 17 00:00:00 2001 From: Christian Ebner Date: Thu, 28 Nov 2024 17:07:20 +0100 Subject: [PATCH 297/299] api types: version: implement traits to allow for version comparison Derive and implement the traits to allow comparison of two `ApiVersion` instances for more direct and easy api version comparisons. Further, add some basic test cases to reduce risk of regressions. This is useful for e.g. feature compatibility checks by comparing api versions of remote instances. Example comparison: ``` api_version >= ApiVersion::new(3, 3, 0) ``` Signed-off-by: Christian Ebner --- pbs-api-types/src/version.rs | 122 +++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs index bd4c517d..09e725eb 100644 --- a/pbs-api-types/src/version.rs +++ b/pbs-api-types/src/version.rs @@ -1,4 +1,5 @@ //! Defines the types for the api version info endpoint +use std::cmp::Ordering; use std::convert::TryFrom; use anyhow::{format_err, Context}; @@ -33,6 +34,7 @@ pub type ApiVersionMajor = u64; pub type ApiVersionMinor = u64; pub type ApiVersionRelease = u64; +#[derive(PartialEq, Eq)] pub struct ApiVersion { pub major: ApiVersionMajor, pub minor: ApiVersionMinor, @@ -66,3 +68,123 @@ impl TryFrom for ApiVersion { }) } } + +impl PartialOrd for ApiVersion { + fn partial_cmp(&self, other: &Self) -> Option { + let ordering = match ( + self.major.cmp(&other.major), + self.minor.cmp(&other.minor), + self.release.cmp(&other.release), + ) { + (Ordering::Equal, Ordering::Equal, ordering) => ordering, + (Ordering::Equal, ordering, _) => ordering, + (ordering, _, _) => ordering, + }; + + Some(ordering) + } +} + +impl ApiVersion { + pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self { + Self { + major, + minor, + release, + } + } +} + +#[test] +fn same_level_version_comarison() { + let major_base = ApiVersion::new(2, 0, 0); + let major_less = ApiVersion::new(1, 0, 0); + let major_greater = ApiVersion::new(3, 0, 0); + + let minor_base = ApiVersion::new(2, 2, 0); + let minor_less = ApiVersion::new(2, 1, 0); + let minor_greater = ApiVersion::new(2, 3, 0); + + let release_base = ApiVersion::new(2, 2, 2); + let release_less = ApiVersion::new(2, 2, 1); + let release_greater = ApiVersion::new(2, 2, 3); + + assert!(major_base == major_base); + assert!(minor_base == minor_base); + assert!(release_base == release_base); + + assert!(major_base > major_less); + assert!(major_base >= major_less); + assert!(major_base != major_less); + + assert!(major_base < major_greater); + assert!(major_base <= major_greater); + assert!(major_base != major_greater); + + assert!(minor_base > minor_less); + assert!(minor_base >= minor_less); + assert!(minor_base != minor_less); + + assert!(minor_base < minor_greater); + assert!(minor_base <= minor_greater); + assert!(minor_base != minor_greater); + + assert!(release_base > release_less); + assert!(release_base >= release_less); + assert!(release_base != release_less); + + assert!(release_base < release_greater); + assert!(release_base <= release_greater); + assert!(release_base != release_greater); +} + +#[test] +fn mixed_level_version_comarison() { + let major_base = ApiVersion::new(2, 0, 0); + let major_less = ApiVersion::new(1, 0, 0); + let major_greater = ApiVersion::new(3, 0, 0); + + let minor_base = ApiVersion::new(2, 2, 0); + let minor_less = ApiVersion::new(2, 1, 0); + let minor_greater = ApiVersion::new(2, 3, 0); + + let release_base = ApiVersion::new(2, 2, 2); + let release_less = ApiVersion::new(2, 2, 1); + let release_greater = ApiVersion::new(2, 2, 3); + + assert!(major_base < minor_base); + assert!(major_base < minor_less); + assert!(major_base < minor_greater); + + assert!(major_base < release_base); + assert!(major_base < release_less); + assert!(major_base < release_greater); + + assert!(major_less < minor_base); + assert!(major_less < minor_less); + assert!(major_less < minor_greater); + + assert!(major_less < release_base); + assert!(major_less < release_less); + assert!(major_less < release_greater); + + assert!(major_greater > minor_base); + assert!(major_greater > minor_less); + assert!(major_greater > minor_greater); + + assert!(major_greater > release_base); + assert!(major_greater > release_less); + assert!(major_greater > release_greater); + + assert!(minor_base < release_base); + assert!(minor_base < release_less); + assert!(minor_base < release_greater); + + assert!(minor_greater > release_base); + assert!(minor_greater > release_less); + assert!(minor_greater > release_greater); + + assert!(minor_less < release_base); + assert!(minor_less < release_less); + assert!(minor_less < release_greater); +} From 2f3e985b5fea106b50ab5597cc96f1f41074e6b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= Date: Tue, 3 Dec 2024 11:52:00 +0100 Subject: [PATCH 298/299] docs: escape in doc comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit warning: unclosed HTML tag `nodename` --> pbs-api-types/src/metrics.rs:224:5 | 224 | / /// Unique identifier for this metric object, for instance 'node/' 225 | | /// or 'qemu/'. | |_________________________^ | = note: `#[warn(rustdoc::invalid_html_tags)]` on by default warning: unclosed HTML tag `vmid` --> pbs-api-types/src/metrics.rs:224:5 | 224 | / /// Unique identifier for this metric object, for instance 'node/' 225 | | /// or 'qemu/'. | |_________________________^ warning: `pbs-api-types` (lib doc) generated 2 warnings Signed-off-by: Fabian Grünbichler --- pbs-api-types/src/metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index 26266529..014e28e4 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -221,8 +221,8 @@ pub struct Metrics { /// Metric data point #[derive(Clone, Debug, Deserialize, Serialize)] pub struct MetricDataPoint { - /// Unique identifier for this metric object, for instance 'node/' - /// or 'qemu/'. + /// Unique identifier for this metric object, for instance `node/` + /// or `qemu/`. pub id: String, /// Name of the metric. From 5a7993beb0ca98ed85b3867addb1051c9ef3c486 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 15 Jan 2025 13:03:42 +0100 Subject: [PATCH 299/299] update to proxmox-schema 4 Signed-off-by: Wolfgang Bumiller --- pbs-api-types/src/traffic_control.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index 0da327f2..c68f4637 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -68,17 +68,15 @@ impl RateLimitConfig { } } -const CLIENT_RATE_LIMIT_SCHEMA: Schema = StringSchema { - description: "Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", - ..*HumanByte::API_SCHEMA.unwrap_string_schema() -} -.schema(); +const CLIENT_RATE_LIMIT_SCHEMA: Schema = HumanByte::API_SCHEMA + .unwrap_string_schema_cloned() + .description("Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).") + .schema(); -const CLIENT_BURST_SCHEMA: Schema = StringSchema { - description: "Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", - ..*HumanByte::API_SCHEMA.unwrap_string_schema() -} -.schema(); +const CLIENT_BURST_SCHEMA: Schema = HumanByte::API_SCHEMA + .unwrap_string_schema_cloned() + .description("Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).") + .schema(); #[api( properties: {