diff --git a/Cargo.toml b/Cargo.toml index adeeb6ef..62f91553 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,6 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"] [workspace] members = [ - "pbs-api-types", "pbs-buildcfg", "pbs-client", "pbs-config", @@ -91,6 +90,7 @@ proxmox-tfa = { version = "5", features = [ "api", "api-types" ] } proxmox-time = "2" proxmox-uuid = "1" proxmox-worker-task = "0.1" +pbs-api-types = "0.2.0" # other proxmox crates pathpatterns = "0.3" @@ -98,7 +98,6 @@ proxmox-acme = "0.5.3" pxar = "0.12.1" # PBS workspace -pbs-api-types = { path = "pbs-api-types" } pbs-buildcfg = { path = "pbs-buildcfg" } pbs-client = { path = "pbs-client" } pbs-config = { path = "pbs-config" } @@ -236,13 +235,13 @@ proxmox-tfa.workspace = true proxmox-time.workspace = true proxmox-uuid.workspace = true proxmox-worker-task.workspace = true +pbs-api-types.workspace = true # in their respective repo proxmox-acme.workspace = true pxar.workspace = true # proxmox-backup workspace/internal crates -pbs-api-types.workspace = true pbs-buildcfg.workspace = true pbs-client.workspace = true pbs-config.workspace = true diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml deleted file mode 100644 index 17c946fe..00000000 --- a/pbs-api-types/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "pbs-api-types" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -description = "general API type helpers for PBS" - -[dependencies] -anyhow.workspace = true -const_format.workspace = true -hex.workspace = true -percent-encoding.workspace = true -regex.workspace = true -serde.workspace = true -serde_plain.workspace = true - -proxmox-auth-api = { workspace = true, features = [ "api-types" ] } -proxmox-apt-api-types.workspace = true -proxmox-human-byte.workspace = true -proxmox-lang.workspace=true -proxmox-schema = { workspace = true, features = [ "api-macro" ] } -proxmox-serde.workspace = true -proxmox-time.workspace = true -proxmox-uuid = { workspace = true, features = [ "serde" ] } diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs deleted file mode 100644 index e2f97f06..00000000 --- a/pbs-api-types/src/acl.rs +++ /dev/null @@ -1,332 +0,0 @@ -use std::str::FromStr; - -use const_format::concatcp; -use serde::de::{value, IntoDeserializer}; -use serde::{Deserialize, Serialize}; - -use proxmox_lang::constnamedbitmap; -use proxmox_schema::{ - api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema, -}; - -use crate::PROXMOX_SAFE_ID_REGEX_STR; - -const_regex! { - pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$"); -} - -// define Privilege bitfield - -constnamedbitmap! { - /// Contains a list of privilege name to privilege value mappings. - /// - /// The names are used when displaying/persisting privileges anywhere, the values are used to - /// allow easy matching of privileges as bitflags. - PRIVILEGES: u64 => { - /// Sys.Audit allows knowing about the system and its status - PRIV_SYS_AUDIT("Sys.Audit"); - /// Sys.Modify allows modifying system-level configuration - PRIV_SYS_MODIFY("Sys.Modify"); - /// Sys.Modify allows to poweroff/reboot/.. the system - PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement"); - - /// Datastore.Audit allows knowing about a datastore, - /// including reading the configuration entry and listing its contents - PRIV_DATASTORE_AUDIT("Datastore.Audit"); - /// Datastore.Allocate allows creating or deleting datastores - PRIV_DATASTORE_ALLOCATE("Datastore.Allocate"); - /// Datastore.Modify allows modifying a datastore and its contents - PRIV_DATASTORE_MODIFY("Datastore.Modify"); - /// Datastore.Read allows reading arbitrary backup contents - PRIV_DATASTORE_READ("Datastore.Read"); - /// Allows verifying a datastore - PRIV_DATASTORE_VERIFY("Datastore.Verify"); - - /// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots, - /// but also requires backup ownership - PRIV_DATASTORE_BACKUP("Datastore.Backup"); - /// Datastore.Prune allows deleting snapshots, - /// but also requires backup ownership - PRIV_DATASTORE_PRUNE("Datastore.Prune"); - - /// Permissions.Modify allows modifying ACLs - PRIV_PERMISSIONS_MODIFY("Permissions.Modify"); - - /// Remote.Audit allows reading remote.cfg and sync.cfg entries - PRIV_REMOTE_AUDIT("Remote.Audit"); - /// Remote.Modify allows modifying remote.cfg - PRIV_REMOTE_MODIFY("Remote.Modify"); - /// Remote.Read allows reading data from a configured `Remote` - PRIV_REMOTE_READ("Remote.Read"); - /// Remote.DatastoreBackup allows creating new snapshots on remote datastores - PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup"); - /// Remote.DatastoreModify allows to modify remote datastores - PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify"); - /// Remote.DatastorePrune allows deleting snapshots on remote datastores - PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune"); - - /// Sys.Console allows access to the system's console - PRIV_SYS_CONSOLE("Sys.Console"); - - /// Tape.Audit allows reading tape backup configuration and status - PRIV_TAPE_AUDIT("Tape.Audit"); - /// Tape.Modify allows modifying tape backup configuration - PRIV_TAPE_MODIFY("Tape.Modify"); - /// Tape.Write allows writing tape media - PRIV_TAPE_WRITE("Tape.Write"); - /// Tape.Read allows reading tape backup configuration and media contents - PRIV_TAPE_READ("Tape.Read"); - - /// Realm.Allocate allows viewing, creating, modifying and deleting realms - PRIV_REALM_ALLOCATE("Realm.Allocate"); - } -} - -pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> { - PRIVILEGES - .iter() - .fold(Vec::new(), |mut priv_names, (name, value)| { - if value & privs != 0 { - priv_names.push(name); - } - priv_names - }) -} - -/// Admin always has all privileges. It can do everything except a few actions -/// which are limited to the 'root@pam` superuser -pub const ROLE_ADMIN: u64 = u64::MAX; - -/// NoAccess can be used to remove privileges from specific (sub-)paths -pub const ROLE_NO_ACCESS: u64 = 0; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Audit can view configuration and status information, but not modify it. -pub const ROLE_AUDIT: u64 = 0 - | PRIV_SYS_AUDIT - | PRIV_DATASTORE_AUDIT; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Datastore.Admin can do anything on the datastore. -pub const ROLE_DATASTORE_ADMIN: u64 = 0 - | PRIV_DATASTORE_AUDIT - | PRIV_DATASTORE_MODIFY - | PRIV_DATASTORE_READ - | PRIV_DATASTORE_VERIFY - | PRIV_DATASTORE_BACKUP - | PRIV_DATASTORE_PRUNE; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Datastore.Reader can read/verify datastore content and do restore -pub const ROLE_DATASTORE_READER: u64 = 0 - | PRIV_DATASTORE_AUDIT - | PRIV_DATASTORE_VERIFY - | PRIV_DATASTORE_READ; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Datastore.Backup can do backup and restore, but no prune. -pub const ROLE_DATASTORE_BACKUP: u64 = 0 - | PRIV_DATASTORE_BACKUP; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Datastore.PowerUser can do backup, restore, and prune. -pub const ROLE_DATASTORE_POWERUSER: u64 = 0 - | PRIV_DATASTORE_PRUNE - | PRIV_DATASTORE_BACKUP; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Datastore.Audit can audit the datastore. -pub const ROLE_DATASTORE_AUDIT: u64 = 0 - | PRIV_DATASTORE_AUDIT; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.Audit can audit the remote -pub const ROLE_REMOTE_AUDIT: u64 = 0 - | PRIV_REMOTE_AUDIT; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.Admin can do anything on the remote. -pub const ROLE_REMOTE_ADMIN: u64 = 0 - | PRIV_REMOTE_AUDIT - | PRIV_REMOTE_MODIFY - | PRIV_REMOTE_READ; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.SyncOperator can do read and prune on the remote. -pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0 - | PRIV_REMOTE_AUDIT - | PRIV_REMOTE_READ; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.SyncPushOperator can read and push snapshots to the remote. -pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0 - | PRIV_REMOTE_AUDIT - | PRIV_REMOTE_DATASTORE_BACKUP; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots -/// and groups but not create or remove namespaces. -pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0 - | PRIV_REMOTE_AUDIT - | PRIV_REMOTE_DATASTORE_BACKUP - | PRIV_REMOTE_DATASTORE_PRUNE; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots -/// and groups, as well as create or remove namespaces. -pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0 - | PRIV_REMOTE_AUDIT - | PRIV_REMOTE_DATASTORE_BACKUP - | PRIV_REMOTE_DATASTORE_MODIFY - | PRIV_REMOTE_DATASTORE_PRUNE; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Tape.Audit can audit the tape backup configuration and media content -pub const ROLE_TAPE_AUDIT: u64 = 0 - | PRIV_TAPE_AUDIT; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Tape.Admin can do anything on the tape backup -pub const ROLE_TAPE_ADMIN: u64 = 0 - | PRIV_TAPE_AUDIT - | PRIV_TAPE_MODIFY - | PRIV_TAPE_READ - | PRIV_TAPE_WRITE; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Tape.Operator can do tape backup and restore (but no configuration changes) -pub const ROLE_TAPE_OPERATOR: u64 = 0 - | PRIV_TAPE_AUDIT - | PRIV_TAPE_READ - | PRIV_TAPE_WRITE; - -#[rustfmt::skip] -#[allow(clippy::identity_op)] -/// Tape.Reader can do read and inspect tape content -pub const ROLE_TAPE_READER: u64 = 0 - | PRIV_TAPE_AUDIT - | PRIV_TAPE_READ; - -/// NoAccess can be used to remove privileges from specific (sub-)paths -pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess"; - -#[api( - type_text: "", -)] -#[repr(u64)] -#[derive(Serialize, Deserialize)] -/// Enum representing roles via their [PRIVILEGES] combination. -/// -/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a -/// single, unique `u64` value that is used in this enum definition. -pub enum Role { - /// Administrator - Admin = ROLE_ADMIN, - /// Auditor - Audit = ROLE_AUDIT, - /// Disable Access - NoAccess = ROLE_NO_ACCESS, - /// Datastore Administrator - DatastoreAdmin = ROLE_DATASTORE_ADMIN, - /// Datastore Reader (inspect datastore content and do restores) - DatastoreReader = ROLE_DATASTORE_READER, - /// Datastore Backup (backup and restore owned backups) - DatastoreBackup = ROLE_DATASTORE_BACKUP, - /// Datastore PowerUser (backup, restore and prune owned backup) - DatastorePowerUser = ROLE_DATASTORE_POWERUSER, - /// Datastore Auditor - DatastoreAudit = ROLE_DATASTORE_AUDIT, - /// Remote Auditor - RemoteAudit = ROLE_REMOTE_AUDIT, - /// Remote Administrator - RemoteAdmin = ROLE_REMOTE_ADMIN, - /// Synchronization Operator - RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, - /// Synchronisation Operator (push direction) - RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR, - /// Remote Datastore Prune - RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER, - /// Remote Datastore Admin - RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN, - /// Tape Auditor - TapeAudit = ROLE_TAPE_AUDIT, - /// Tape Administrator - TapeAdmin = ROLE_TAPE_ADMIN, - /// Tape Operator - TapeOperator = ROLE_TAPE_OPERATOR, - /// Tape Reader - TapeReader = ROLE_TAPE_READER, -} - -impl FromStr for Role { - type Err = value::Error; - - fn from_str(s: &str) -> Result { - Self::deserialize(s.into_deserializer()) - } -} - -pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX); - -pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.") - .format(&ACL_PATH_FORMAT) - .min_length(1) - .max_length(128) - .schema(); - -pub const ACL_PROPAGATE_SCHEMA: Schema = - BooleanSchema::new("Allow to propagate (inherit) permissions.") - .default(true) - .schema(); - -pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.") - .format(&ApiStringFormat::Enum(&[ - EnumEntry::new("user", "User"), - EnumEntry::new("group", "Group"), - ])) - .schema(); - -#[api( - properties: { - propagate: { - schema: ACL_PROPAGATE_SCHEMA, - }, - path: { - schema: ACL_PATH_SCHEMA, - }, - ugid_type: { - schema: ACL_UGID_TYPE_SCHEMA, - }, - ugid: { - type: String, - description: "User or Group ID.", - }, - roleid: { - type: Role, - } - } -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -/// ACL list entry. -pub struct AclListItem { - pub path: String, - pub ugid: String, - pub ugid_type: String, - pub propagate: bool, - pub roleid: String, -} diff --git a/pbs-api-types/src/ad.rs b/pbs-api-types/src/ad.rs deleted file mode 100644 index 910571a0..00000000 --- a/pbs-api-types/src/ad.rs +++ /dev/null @@ -1,98 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, Updater}; - -use super::{ - LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, - SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA, -}; - -#[api( - properties: { - "realm": { - schema: REALM_ID_SCHEMA, - }, - "comment": { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - "verify": { - optional: true, - default: false, - }, - "sync-defaults-options": { - schema: SYNC_DEFAULTS_STRING_SCHEMA, - optional: true, - }, - "sync-attributes": { - schema: SYNC_ATTRIBUTES_SCHEMA, - optional: true, - }, - "user-classes" : { - optional: true, - schema: USER_CLASSES_SCHEMA, - }, - "base-dn" : { - schema: LDAP_DOMAIN_SCHEMA, - optional: true, - }, - "bind-dn" : { - schema: LDAP_DOMAIN_SCHEMA, - optional: true, - } - }, -)] -#[derive(Serialize, Deserialize, Updater, Clone)] -#[serde(rename_all = "kebab-case")] -/// AD realm configuration properties. -pub struct AdRealmConfig { - #[updater(skip)] - pub realm: String, - /// AD server address - pub server1: String, - /// Fallback AD server address - #[serde(skip_serializing_if = "Option::is_none")] - pub server2: Option, - /// AD server Port - #[serde(skip_serializing_if = "Option::is_none")] - pub port: Option, - /// Base domain name. Users are searched under this domain using a `subtree search`. - /// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be - /// overridden if the need arises. - #[serde(skip_serializing_if = "Option::is_none")] - pub base_dn: Option, - /// Comment - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// Connection security - #[serde(skip_serializing_if = "Option::is_none")] - pub mode: Option, - /// Verify server certificate - #[serde(skip_serializing_if = "Option::is_none")] - pub verify: Option, - /// CA certificate to use for the server. The path can point to - /// either a file, or a directory. If it points to a file, - /// the PEM-formatted X.509 certificate stored at the path - /// will be added as a trusted certificate. - /// If the path points to a directory, - /// the directory replaces the system's default certificate - /// store at `/etc/ssl/certs` - Every file in the directory - /// will be loaded as a trusted certificate. - #[serde(skip_serializing_if = "Option::is_none")] - pub capath: Option, - /// Bind domain to use for looking up users - #[serde(skip_serializing_if = "Option::is_none")] - pub bind_dn: Option, - /// Custom LDAP search filter for user sync - #[serde(skip_serializing_if = "Option::is_none")] - pub filter: Option, - /// Default options for AD sync - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_defaults_options: Option, - /// List of LDAP attributes to sync from AD to user config - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_attributes: Option, - /// User ``objectClass`` classes to sync - #[serde(skip_serializing_if = "Option::is_none")] - pub user_classes: Option, -} diff --git a/pbs-api-types/src/crypto.rs b/pbs-api-types/src/crypto.rs deleted file mode 100644 index cdc1ba64..00000000 --- a/pbs-api-types/src/crypto.rs +++ /dev/null @@ -1,95 +0,0 @@ -use std::fmt::{self, Display}; - -use anyhow::Error; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::api; - -#[api(default: "encrypt")] -#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "kebab-case")] -/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither. -pub enum CryptMode { - /// Don't encrypt. - None, - /// Encrypt. - Encrypt, - /// Only sign. - SignOnly, -} - -#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)] -#[serde(transparent)] -/// 32-byte fingerprint, usually calculated with SHA256. -pub struct Fingerprint { - #[serde(with = "bytes_as_fingerprint")] - bytes: [u8; 32], -} - -impl Fingerprint { - pub fn new(bytes: [u8; 32]) -> Self { - Self { bytes } - } - pub fn bytes(&self) -> &[u8; 32] { - &self.bytes - } - pub fn signature(&self) -> String { - as_fingerprint(&self.bytes) - } -} - -/// Display as short key ID -impl Display for Fingerprint { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", as_fingerprint(&self.bytes[0..8])) - } -} - -impl std::str::FromStr for Fingerprint { - type Err = Error; - - fn from_str(s: &str) -> Result { - let mut tmp = s.to_string(); - tmp.retain(|c| c != ':'); - let mut bytes = [0u8; 32]; - hex::decode_to_slice(&tmp, &mut bytes)?; - Ok(Fingerprint::new(bytes)) - } -} - -fn as_fingerprint(bytes: &[u8]) -> String { - hex::encode(bytes) - .as_bytes() - .chunks(2) - .map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string - .collect::>() - .join(":") -} - -pub mod bytes_as_fingerprint { - use std::mem::MaybeUninit; - - use serde::{Deserialize, Deserializer, Serializer}; - - pub fn serialize(bytes: &[u8; 32], serializer: S) -> Result - where - S: Serializer, - { - let s = super::as_fingerprint(bytes); - serializer.serialize_str(&s) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error> - where - D: Deserializer<'de>, - { - // TODO: more efficiently implement with a Visitor implementing visit_str using split() and - // hex::decode by-byte - let mut s = String::deserialize(deserializer)?; - s.retain(|c| c != ':'); - let mut out = MaybeUninit::<[u8; 32]>::uninit(); - hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] }) - .map_err(serde::de::Error::custom)?; - Ok(unsafe { out.assume_init() }) - } -} diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs deleted file mode 100644 index ddd8d3c6..00000000 --- a/pbs-api-types/src/datastore.rs +++ /dev/null @@ -1,1971 +0,0 @@ -use std::convert::{AsRef, TryFrom}; -use std::fmt; -use std::path::{Path, PathBuf}; -use std::str::FromStr; -use std::sync::LazyLock; - -use anyhow::{bail, format_err, Error}; -use const_format::concatcp; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{ - api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, - Schema, StringSchema, Updater, UpdaterType, -}; - -use crate::{ - Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, MaintenanceType, Userid, - BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, - GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT, - PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, - SNAPSHOT_PATH_REGEX_STR, UPID, -}; - -const_regex! { - pub BACKUP_NAMESPACE_REGEX = concatcp!(r"^", BACKUP_NS_RE, r"$"); - - pub BACKUP_TYPE_REGEX = concatcp!(r"^(", BACKUP_TYPE_RE, r")$"); - - pub BACKUP_ID_REGEX = concatcp!(r"^", BACKUP_ID_RE, r"$"); - - pub BACKUP_DATE_REGEX = concatcp!(r"^", BACKUP_TIME_RE ,r"$"); - - pub GROUP_PATH_REGEX = concatcp!( - r"^(", BACKUP_TYPE_RE, ")/", - r"(", BACKUP_ID_RE, r")$", - ); - - pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; - - pub SNAPSHOT_PATH_REGEX = concatcp!(r"^", SNAPSHOT_PATH_REGEX_STR, r"$"); - pub GROUP_OR_SNAPSHOT_PATH_REGEX = concatcp!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR, r"$"); - - pub DATASTORE_MAP_REGEX = concatcp!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR, r"=)?", PROXMOX_SAFE_ID_REGEX_STR, r"$"); -} - -pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); - -pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") - .min_length(1) - .max_length(4096) - .schema(); - -pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .schema(); - -pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); -pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX); -pub const BACKUP_NAMESPACE_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&BACKUP_NAMESPACE_REGEX); - -pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.") - .format(&BACKUP_ID_FORMAT) - .schema(); - -pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.") - .format(&ApiStringFormat::Enum(&[ - EnumEntry::new("vm", "Virtual Machine Backup"), - EnumEntry::new("ct", "Container Backup"), - EnumEntry::new("host", "Host Backup"), - ])) - .schema(); - -pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)") - .minimum(1) - .schema(); - -pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group") - .format(&BACKUP_GROUP_FORMAT) - .schema(); - -/// The maximal, inclusive depth for namespaces from the root ns downwards -/// -/// The datastore root name space is at depth zero (0), so we have in total eight (8) levels -pub const MAX_NAMESPACE_DEPTH: usize = 7; -pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256 -pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.") - .format(&BACKUP_NAMESPACE_FORMAT) - .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256 - .schema(); - -pub const NS_MAX_DEPTH_SCHEMA: Schema = - IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)") - .minimum(0) - .maximum(MAX_NAMESPACE_DEPTH as isize) - .default(MAX_NAMESPACE_DEPTH as isize) - .schema(); - -pub const NS_MAX_DEPTH_REDUCED_SCHEMA: Schema = -IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)") - .minimum(0) - .maximum(MAX_NAMESPACE_DEPTH as isize) - .schema(); - -pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).") - .format(&CHUNK_DIGEST_FORMAT) - .schema(); - -pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX); - -pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.") - .format(&DATASTORE_MAP_FORMAT) - .min_length(3) - .max_length(65) - .type_text("(=)?") - .schema(); - -pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = - ArraySchema::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA).schema(); - -pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new( - "A list of Datastore mappings (or single datastore), comma separated. \ - For example 'a=b,e' maps the source datastore 'a' to target 'b and \ - all other sources to the default 'e'. If no default is given, only the \ - specified sources are mapped.", -) -.format(&ApiStringFormat::PropertyString( - &DATASTORE_MAP_ARRAY_SCHEMA, -)) -.schema(); - -pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = - IntegerSchema::new("Number of hourly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = - IntegerSchema::new("Number of monthly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = - IntegerSchema::new("Number of weekly backups to keep.") - .minimum(1) - .schema(); - -pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = - IntegerSchema::new("Number of yearly backups to keep.") - .minimum(1) - .schema(); - -/// Base directory where datastores are mounted -pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; - -#[api] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// The order to sort chunks by -pub enum ChunkOrder { - /// Iterate chunks in the index order - None, - /// Iterate chunks in inode order - #[default] - Inode, -} - -#[api] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Current mounting status of a datastore, useful for removable datastores. -pub enum DataStoreMountStatus { - /// Removable datastore is currently mounted correctly. - Mounted, - /// Removable datastore is currebtly not mounted. - NotMounted, - /// Datastore is not removable, so there is no mount status. - #[default] - NonRemovable, -} - -#[api] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// The level of syncing that is done when writing into a datastore. -pub enum DatastoreFSyncLevel { - /// No special fsync or syncfs calls are triggered. The system default dirty write back - /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs` - /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s. - /// - /// This mode provides generally the best performance, as all write back can happen async, - /// which reduces IO pressure. - /// But it may cause losing data on powerloss or system crash without any uninterruptible power - /// supply. - None, - /// Triggers a fsync after writing any chunk on the datastore. While this can slow down - /// backups significantly, depending on the underlying file system and storage used, it - /// will ensure fine-grained consistency. Depending on the exact setup, there might be no - /// benefits over the file system level sync, so if the setup allows it, you should prefer - /// that one. Despite the possible negative impact in performance, it's the most consistent - /// mode. - File, - /// Trigger a filesystem wide sync after all backup data got written but before finishing the - /// task. This allows that every finished backup is fully written back to storage - /// while reducing the impact on many file systems in contrast to the file level sync. - /// Depending on the setup, it might have a negative impact on unrelated write operations - /// of the underlying filesystem, but it is generally a good compromise between performance - /// and consistency. - #[default] - Filesystem, -} - -#[api( - properties: { - "chunk-order": { - type: ChunkOrder, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Datastore tuning options -pub struct DatastoreTuning { - /// Iterate chunks in this order - #[serde(skip_serializing_if = "Option::is_none")] - pub chunk_order: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_level: Option, -} - -pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options") - .format(&ApiStringFormat::PropertyString( - &DatastoreTuning::API_SCHEMA, - )) - .schema(); - -#[api( - properties: { - name: { - schema: DATASTORE_SCHEMA, - }, - path: { - schema: DATASTORE_DIR_NAME_SCHEMA, - }, - "notify-user": { - optional: true, - type: Userid, - }, - "notify": { - optional: true, - schema: DATASTORE_NOTIFY_STRING_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - "gc-schedule": { - optional: true, - schema: GC_SCHEDULE_SCHEMA, - }, - "prune-schedule": { - optional: true, - schema: PRUNE_SCHEDULE_SCHEMA, - }, - keep: { - type: crate::KeepOptions, - }, - "verify-new": { - description: "If enabled, all new backups will be verified right after completion.", - optional: true, - type: bool, - }, - tuning: { - optional: true, - schema: DATASTORE_TUNING_STRING_SCHEMA, - }, - "maintenance-mode": { - optional: true, - format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), - type: String, - }, - "backing-device": { - description: "The UUID of the filesystem partition for removable datastores.", - optional: true, - format: &proxmox_schema::api_types::UUID_FORMAT, - type: String, - } - } -)] -#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Datastore configuration properties. -pub struct DataStoreConfig { - #[updater(skip)] - pub name: String, - - #[updater(skip)] - pub path: String, - - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub gc_schedule: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub prune_schedule: Option, - - #[serde(flatten)] - pub keep: crate::KeepOptions, - - /// If enabled, all backups will be verified right after completion. - #[serde(skip_serializing_if = "Option::is_none")] - pub verify_new: Option, - - /// Send job email notification to this user - #[serde(skip_serializing_if = "Option::is_none")] - pub notify_user: Option, - - /// Send notification only for job errors - #[serde(skip_serializing_if = "Option::is_none")] - pub notify: Option, - - /// Opt in to the new notification system - #[serde(skip_serializing_if = "Option::is_none")] - pub notification_mode: Option, - - /// Datastore tuning options - #[serde(skip_serializing_if = "Option::is_none")] - pub tuning: Option, - - /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " - #[serde(skip_serializing_if = "Option::is_none")] - pub maintenance_mode: Option, - - /// The UUID of the device(for removable datastores) - #[updater(skip)] - #[serde(skip_serializing_if = "Option::is_none")] - pub backing_device: Option, -} - -#[api] -#[derive(Serialize, Deserialize, Updater, Clone, PartialEq, Default)] -#[serde(rename_all = "kebab-case")] -/// Configure how notifications for this datastore should be sent. -/// `legacy-sendmail` sends email notifications to the user configured -/// in `notify-user` via the system's `sendmail` executable. -/// `notification-system` emits matchable notification events to the -/// notification system. -pub enum NotificationMode { - /// Send notifications via the system's sendmail command to the user - /// configured in `notify-user` - #[default] - LegacySendmail, - /// Emit notification events to the notification system - NotificationSystem, -} - -impl DataStoreConfig { - pub fn new(name: String, path: String) -> Self { - Self { - name, - path, - comment: None, - gc_schedule: None, - prune_schedule: None, - keep: Default::default(), - verify_new: None, - notify_user: None, - notify: None, - notification_mode: None, - tuning: None, - maintenance_mode: None, - backing_device: None, - } - } - - /// Returns the absolute path to the datastore content. - pub fn absolute_path(&self) -> String { - if self.backing_device.is_some() { - format!("{DATASTORE_MOUNT_DIR}/{}", self.name) - } else { - self.path.clone() - } - } - - pub fn get_maintenance_mode(&self) -> Option { - self.maintenance_mode.as_ref().and_then(|str| { - MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new( - str, - &MaintenanceMode::API_SCHEMA, - )) - .ok() - }) - } - - pub fn set_maintenance_mode(&mut self, new_mode: Option) -> Result<(), Error> { - let current_type = self.get_maintenance_mode().map(|mode| mode.ty); - let new_type = new_mode.as_ref().map(|mode| mode.ty); - - match current_type { - Some(MaintenanceType::ReadOnly) => { /* always OK */ } - Some(MaintenanceType::Offline) => { /* always OK */ } - Some(MaintenanceType::Unmount) => { - /* used to reset it after failed unmount, or alternative for aborting unmount task */ - } - Some(MaintenanceType::Delete) => { - match new_type { - Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } - _ => { - bail!("datastore is being deleted") - } - } - } - None => { /* always OK */ } - } - - let new_mode = match new_mode { - Some(new_mode) => Some( - proxmox_schema::property_string::PropertyString::new(new_mode) - .to_property_string()?, - ), - None => None, - }; - - self.maintenance_mode = new_mode; - - Ok(()) - } - - pub fn ensure_not_nested(&self, stores: &[DataStoreConfig]) -> Result<(), Error> { - let our_absolute_path = PathBuf::from(self.absolute_path()); - let removable = self.backing_device.is_some(); - for other_store in stores { - if self == other_store { - continue; - }; - - // Relative paths must not be nested on the backing device of removable datastores - if removable && other_store.backing_device == self.backing_device { - let our_relative_path = Path::new(&self.path); - let other_relative_path = Path::new(&other_store.path); - if our_relative_path.starts_with(other_relative_path) - || other_relative_path.starts_with(our_relative_path) - { - bail!( - "paths on backing device must not be nested - {path:?} already used by '{store}'!", - path = other_relative_path, - store = other_store.name, - ); - } - } - - // No two datastores should have a nested absolute path - let other_absolute_path = PathBuf::from(other_store.absolute_path()); - if other_absolute_path.starts_with(&our_absolute_path) - || our_absolute_path.starts_with(&other_absolute_path) - { - bail!( - "nested datastores not allowed: '{}' already in {:?}", - other_store.name, - other_absolute_path, - ); - } - } - - Ok(()) - } -} - -#[api( - properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - "mount-status": { - type: DataStoreMountStatus, - }, - maintenance: { - optional: true, - format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), - type: String, - } - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a datastore. -pub struct DataStoreListItem { - pub store: String, - pub comment: Option, - #[serde(default)] - pub mount_status: DataStoreMountStatus, - /// If the datastore is in maintenance mode, information about it - #[serde(skip_serializing_if = "Option::is_none")] - pub maintenance: Option, -} - -#[api( - properties: { - "filename": { - schema: BACKUP_ARCHIVE_NAME_SCHEMA, - }, - "crypt-mode": { - type: CryptMode, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic information about archive files inside a backup snapshot. -pub struct BackupContent { - pub filename: String, - /// Info if file is encrypted, signed, or neither. - #[serde(skip_serializing_if = "Option::is_none")] - pub crypt_mode: Option, - /// Archive size (from backup manifest). - #[serde(skip_serializing_if = "Option::is_none")] - pub size: Option, -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Result of a verify operation. -pub enum VerifyState { - /// Verification was successful - Ok, - /// Verification reported one or more errors - Failed, -} - -#[api( - properties: { - upid: { - type: UPID, - }, - state: { - type: VerifyState, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -/// Task properties. -pub struct SnapshotVerifyState { - /// UPID of the verify task - pub upid: UPID, - /// State of the verification. Enum. - pub state: VerifyState, -} - -/// A namespace provides a logical separation between backup groups from different domains -/// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a -/// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and -/// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids. -/// -/// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as -/// the chunk store is still shared. So, users whom do not trust each other must not share a -/// datastore. -/// -/// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid -/// clashes with backup group IDs and future backup_types and to have a clean separation between -/// the namespace directories and the ones from a backup snapshot. -#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)] -pub struct BackupNamespace { - /// The namespace subdirectories without the `ns/` intermediate directories. - inner: Vec, - - /// Cache the total length for efficiency. - len: usize, -} - -impl BackupNamespace { - /// Returns a root namespace reference. - pub const fn root() -> Self { - Self { - inner: Vec::new(), - len: 0, - } - } - - /// True if this represents the root namespace. - pub fn is_root(&self) -> bool { - self.inner.is_empty() - } - - /// Try to parse a string into a namespace. - pub fn new(name: &str) -> Result { - let mut this = Self::root(); - - if name.is_empty() { - return Ok(this); - } - - for name in name.split('/') { - this.push(name.to_string())?; - } - Ok(this) - } - - /// Try to parse a file path string (where each sub-namespace is separated by an `ns` - /// subdirectory) into a valid namespace. - pub fn from_path(mut path: &str) -> Result { - let mut this = Self::root(); - loop { - match path.strip_prefix("ns/") { - Some(next) => match next.find('/') { - Some(pos) => { - this.push(next[..pos].to_string())?; - path = &next[(pos + 1)..]; - } - None => { - this.push(next.to_string())?; - break; - } - }, - None if !path.is_empty() => { - bail!("invalid component in namespace path at {:?}", path); - } - None => break, - } - } - Ok(this) - } - - /// Create a new Namespace attached to parent - /// - /// `name` must be a single level namespace ID, that is, no '/' is allowed. - /// This rule also avoids confusion about the name being a NS or NS-path - pub fn from_parent_ns(parent: &Self, name: String) -> Result { - let mut child = parent.to_owned(); - child.push(name)?; - Ok(child) - } - - /// Pop one level off the namespace hierarchy - pub fn pop(&mut self) -> Option { - let dropped = self.inner.pop(); - if let Some(ref dropped) = dropped { - self.len = self.len.saturating_sub(dropped.len() + 1); - } - dropped - } - - /// Get the namespace parent as owned BackupNamespace - pub fn parent(&self) -> Self { - if self.is_root() { - return Self::root(); - } - - let mut parent = self.clone(); - parent.pop(); - - parent - } - - /// Create a new namespace directly from a vec. - /// - /// # Safety - /// - /// Invalid contents may lead to inaccessible backups. - pub unsafe fn from_vec_unchecked(components: Vec) -> Self { - let mut this = Self { - inner: components, - len: 0, - }; - this.recalculate_len(); - this - } - - /// Recalculate the length. - fn recalculate_len(&mut self) { - self.len = self.inner.len().max(1) - 1; // a slash between each component - for part in &self.inner { - self.len += part.len(); - } - } - - /// The hierarchical depth of the namespace, 0 means top-level. - pub fn depth(&self) -> usize { - self.inner.len() - } - - /// The logical name and ID of the namespace. - pub fn name(&self) -> String { - self.to_string() - } - - /// The actual relative backing path of the namespace on the datastore. - pub fn path(&self) -> PathBuf { - self.display_as_path().to_string().into() - } - - /// Get the current namespace length. - /// - /// This includes separating slashes, but does not include the `ns/` intermediate directories. - /// This is not the *path* length, but rather the length that would be produced via - /// `.to_string()`. - #[inline] - pub fn name_len(&self) -> usize { - self.len - } - - /// Get the current namespace path length. - /// - /// This includes the `ns/` subdirectory strings. - pub fn path_len(&self) -> usize { - self.name_len() + 3 * self.inner.len() - } - - /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long. - pub fn push(&mut self, subdir: String) -> Result<(), Error> { - if subdir.contains('/') { - bail!("namespace component contained a slash"); - } - - self.push_do(subdir) - } - - /// Assumes `subdir` already does not contain any slashes. - /// Performs remaining checks and updates the length. - fn push_do(&mut self, subdir: String) -> Result<(), Error> { - let depth = self.depth(); - // check for greater equal to account for the to be added subdir - if depth >= MAX_NAMESPACE_DEPTH { - bail!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}"); - } - - if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH { - bail!("namespace length exceeded"); - } - - if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) { - bail!("not a valid namespace component: {subdir}"); - } - - if !self.inner.is_empty() { - self.len += 1; // separating slash - } - self.len += subdir.len(); - self.inner.push(subdir); - Ok(()) - } - - /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every - /// component. - pub fn display_as_path(&self) -> BackupNamespacePath { - BackupNamespacePath(self) - } - - /// Iterate over the subdirectories. - pub fn components(&self) -> impl Iterator + '_ { - self.inner.iter().map(String::as_str) - } - - /// Map NS by replacing `source_prefix` with `target_prefix` - pub fn map_prefix( - &self, - source_prefix: &BackupNamespace, - target_prefix: &BackupNamespace, - ) -> Result { - let suffix = self - .inner - .strip_prefix(&source_prefix.inner[..]) - .ok_or_else(|| { - format_err!( - "Failed to map namespace - {source_prefix} is not a valid prefix of {self}", - ) - })?; - - let mut new = target_prefix.clone(); - for item in suffix { - new.push(item.clone())?; - } - Ok(new) - } - - /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit - pub fn check_max_depth(&self, depth: usize) -> Result<(), Error> { - let ns_depth = self.depth(); - if ns_depth + depth > MAX_NAMESPACE_DEPTH { - bail!( - "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}", - ); - } - Ok(()) - } - - fn acl_path_extend<'a>(&'a self, mut path: Vec<&'a str>) -> Vec<&'a str> { - if self.is_root() { - path - } else { - path.extend(self.inner.iter().map(|comp| comp.as_str())); - path - } - } - - pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { - self.acl_path_extend(vec!["datastore", store]) - } - - pub fn remote_acl_path<'a>(&'a self, remote: &'a str, store: &'a str) -> Vec<&'a str> { - self.acl_path_extend(vec!["remote", remote, store]) - } - - /// Check whether this namespace contains another namespace. - /// - /// If so, the depth is returned. - /// - /// Example: - /// ``` - /// # use pbs_api_types::BackupNamespace; - /// let main: BackupNamespace = "a/b".parse().unwrap(); - /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap(); - /// let other: BackupNamespace = "x/y".parse().unwrap(); - /// assert_eq!(main.contains(&main), Some(0)); - /// assert_eq!(main.contains(&sub), Some(2)); - /// assert_eq!(sub.contains(&main), None); - /// assert_eq!(main.contains(&other), None); - /// ``` - pub fn contains(&self, other: &BackupNamespace) -> Option { - other - .inner - .strip_prefix(&self.inner[..]) - .map(|suffix| suffix.len()) - } -} - -impl fmt::Display for BackupNamespace { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use std::fmt::Write; - - let mut parts = self.inner.iter(); - if let Some(first) = parts.next() { - f.write_str(first)?; - } - for part in parts { - f.write_char('/')?; - f.write_str(part)?; - } - Ok(()) - } -} - -serde_plain::derive_deserialize_from_fromstr!(BackupNamespace, "valid backup namespace"); - -impl std::str::FromStr for BackupNamespace { - type Err = Error; - - fn from_str(name: &str) -> Result { - Self::new(name) - } -} - -serde_plain::derive_serialize_from_display!(BackupNamespace); - -impl ApiType for BackupNamespace { - const API_SCHEMA: Schema = BACKUP_NAMESPACE_SCHEMA; -} - -/// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`]. -/// -/// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of -/// every component. -pub struct BackupNamespacePath<'a>(&'a BackupNamespace); - -impl fmt::Display for BackupNamespacePath<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut sep = "ns/"; - for part in &self.0.inner { - f.write_str(sep)?; - sep = "/ns/"; - f.write_str(part)?; - } - Ok(()) - } -} - -#[api] -/// Backup types. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum BackupType { - /// Virtual machines. - Vm, - - /// Containers. - Ct, - - /// "Host" backups. - Host, - // NOTE: if you add new types, don't forget to adapt the iter below! -} - -impl BackupType { - pub const fn as_str(&self) -> &'static str { - match self { - BackupType::Vm => "vm", - BackupType::Ct => "ct", - BackupType::Host => "host", - } - } - - /// We used to have alphabetical ordering here when this was a string. - const fn order(self) -> u8 { - match self { - BackupType::Ct => 0, - BackupType::Host => 1, - BackupType::Vm => 2, - } - } - - #[inline] - pub fn iter() -> impl Iterator + Send + Sync + Unpin + 'static { - [BackupType::Vm, BackupType::Ct, BackupType::Host] - .iter() - .copied() - } -} - -impl fmt::Display for BackupType { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_str(), f) - } -} - -impl std::str::FromStr for BackupType { - type Err = Error; - - /// Parse a backup type. - fn from_str(ty: &str) -> Result { - Ok(match ty { - "ct" => BackupType::Ct, - "host" => BackupType::Host, - "vm" => BackupType::Vm, - _ => bail!("invalid backup type {ty:?}"), - }) - } -} - -impl std::cmp::Ord for BackupType { - #[inline] - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.order().cmp(&other.order()) - } -} - -impl std::cmp::PartialOrd for BackupType { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -#[api( - properties: { - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - }, -)] -#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// A backup group (without a data store). -pub struct BackupGroup { - /// Backup type. - #[serde(rename = "backup-type")] - pub ty: BackupType, - - /// Backup id. - #[serde(rename = "backup-id")] - pub id: String, -} - -impl BackupGroup { - pub fn new>(ty: BackupType, id: T) -> Self { - Self { ty, id: id.into() } - } - - pub fn matches(&self, filter: &crate::GroupFilter) -> bool { - use crate::FilterType; - match &filter.filter_type { - FilterType::Group(backup_group) => { - match backup_group.parse::() { - Ok(group) => *self == group, - Err(_) => false, // shouldn't happen if value is schema-checked - } - } - FilterType::BackupType(ty) => self.ty == *ty, - FilterType::Regex(regex) => regex.is_match(&self.to_string()), - } - } - - pub fn apply_filters(&self, filters: &[GroupFilter]) -> bool { - // since there will only be view filter in the list, an extra iteration to get the umber of - // include filter should not be an issue - let is_included = if filters.iter().filter(|f| !f.is_exclude).count() == 0 { - true - } else { - filters - .iter() - .filter(|f| !f.is_exclude) - .any(|filter| self.matches(filter)) - }; - - is_included - && !filters - .iter() - .filter(|f| f.is_exclude) - .any(|filter| self.matches(filter)) - } -} - -impl AsRef for BackupGroup { - #[inline] - fn as_ref(&self) -> &Self { - self - } -} - -impl From<(BackupType, String)> for BackupGroup { - #[inline] - fn from(data: (BackupType, String)) -> Self { - Self { - ty: data.0, - id: data.1, - } - } -} - -impl std::cmp::Ord for BackupGroup { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - let type_order = self.ty.cmp(&other.ty); - if type_order != std::cmp::Ordering::Equal { - return type_order; - } - - // try to compare IDs numerically - let id_self = self.id.parse::(); - let id_other = other.id.parse::(); - match (id_self, id_other) { - (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other), - (Ok(_), Err(_)) => std::cmp::Ordering::Less, - (Err(_), Ok(_)) => std::cmp::Ordering::Greater, - _ => self.id.cmp(&other.id), - } - } -} - -impl std::cmp::PartialOrd for BackupGroup { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl fmt::Display for BackupGroup { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.ty, self.id) - } -} - -impl std::str::FromStr for BackupGroup { - type Err = Error; - - /// Parse a backup group. - /// - /// This parses strings like `vm/100". - fn from_str(path: &str) -> Result { - let cap = GROUP_PATH_REGEX - .captures(path) - .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; - - Ok(Self { - ty: cap.get(1).unwrap().as_str().parse()?, - id: cap.get(2).unwrap().as_str().to_owned(), - }) - } -} - -#[api( - properties: { - "group": { type: BackupGroup }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, - }, -)] -/// Uniquely identify a Backup (relative to data store) -/// -/// We also call this a backup snaphost. -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)] -#[serde(rename_all = "kebab-case")] -pub struct BackupDir { - /// Backup group. - #[serde(flatten)] - pub group: BackupGroup, - - /// Backup timestamp unix epoch. - #[serde(rename = "backup-time")] - pub time: i64, -} - -impl AsRef for BackupDir { - #[inline] - fn as_ref(&self) -> &BackupGroup { - &self.group - } -} - -impl AsRef for BackupDir { - #[inline] - fn as_ref(&self) -> &Self { - self - } -} - -impl From<(BackupGroup, i64)> for BackupDir { - fn from(data: (BackupGroup, i64)) -> Self { - Self { - group: data.0, - time: data.1, - } - } -} - -impl From<(BackupType, String, i64)> for BackupDir { - fn from(data: (BackupType, String, i64)) -> Self { - Self { - group: (data.0, data.1).into(), - time: data.2, - } - } -} - -impl BackupDir { - pub fn with_rfc3339(ty: BackupType, id: T, backup_time_string: &str) -> Result - where - T: Into, - { - let time = proxmox_time::parse_rfc3339(backup_time_string)?; - let group = BackupGroup::new(ty, id.into()); - Ok(Self { group, time }) - } - - #[inline] - pub fn ty(&self) -> BackupType { - self.group.ty - } - - #[inline] - pub fn id(&self) -> &str { - &self.group.id - } -} - -impl std::str::FromStr for BackupDir { - type Err = Error; - - /// Parse a snapshot path. - /// - /// This parses strings like `host/elsa/2020-06-15T05:18:33Z". - fn from_str(path: &str) -> Result { - let cap = SNAPSHOT_PATH_REGEX - .captures(path) - .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; - - BackupDir::with_rfc3339( - cap.get(1).unwrap().as_str().parse()?, - cap.get(2).unwrap().as_str(), - cap.get(3).unwrap().as_str(), - ) - } -} - -impl fmt::Display for BackupDir { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // FIXME: log error? - let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?; - write!(f, "{}/{}", self.group, time) - } -} - -/// Used when both a backup group or a directory can be valid. -pub enum BackupPart { - Group(BackupGroup), - Dir(BackupDir), -} - -impl std::str::FromStr for BackupPart { - type Err = Error; - - /// Parse a path which can be either a backup group or a snapshot dir. - fn from_str(path: &str) -> Result { - let cap = GROUP_OR_SNAPSHOT_PATH_REGEX - .captures(path) - .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; - - let ty = cap.get(1).unwrap().as_str().parse()?; - let id = cap.get(2).unwrap().as_str().to_string(); - - Ok(match cap.get(3) { - Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?), - None => BackupPart::Group((ty, id).into()), - }) - } -} - -#[api( - properties: { - "backup": { type: BackupDir }, - comment: { - schema: SINGLE_LINE_COMMENT_SCHEMA, - optional: true, - }, - verification: { - type: SnapshotVerifyState, - optional: true, - }, - fingerprint: { - type: String, - optional: true, - }, - files: { - items: { - schema: BACKUP_ARCHIVE_NAME_SCHEMA - }, - }, - owner: { - type: Authid, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic information about backup snapshot. -pub struct SnapshotListItem { - #[serde(flatten)] - pub backup: BackupDir, - /// The first line from manifest "notes" - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// The result of the last run verify task - #[serde(skip_serializing_if = "Option::is_none")] - pub verification: Option, - /// Fingerprint of encryption key - #[serde(skip_serializing_if = "Option::is_none")] - pub fingerprint: Option, - /// List of contained archive files. - pub files: Vec, - /// Overall snapshot size (sum of all archive sizes). - #[serde(skip_serializing_if = "Option::is_none")] - pub size: Option, - /// The owner of the snapshots group - #[serde(skip_serializing_if = "Option::is_none")] - pub owner: Option, - /// Protection from prunes - #[serde(default)] - pub protected: bool, -} - -#[api( - properties: { - "backup": { type: BackupGroup }, - "last-backup": { schema: BACKUP_TIME_SCHEMA }, - "backup-count": { - type: Integer, - }, - files: { - items: { - schema: BACKUP_ARCHIVE_NAME_SCHEMA - }, - }, - owner: { - type: Authid, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a backup group. -pub struct GroupListItem { - #[serde(flatten)] - pub backup: BackupGroup, - - pub last_backup: i64, - /// Number of contained snapshots - pub backup_count: u64, - /// List of contained archive files. - pub files: Vec, - /// The owner of group - #[serde(skip_serializing_if = "Option::is_none")] - pub owner: Option, - /// The first line from group "notes" - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[api()] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a backup namespace. -pub struct NamespaceListItem { - /// A backup namespace - pub ns: BackupNamespace, - - // TODO? - //pub group_count: u64, - //pub ns_count: u64, - /// The first line from the namespace's "notes" - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[api( - properties: { - "backup": { type: BackupDir }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Prune result. -pub struct PruneListItem { - #[serde(flatten)] - pub backup: BackupDir, - - /// Keep snapshot - pub keep: bool, -} - -#[api( - properties: { - ct: { - type: TypeCounts, - optional: true, - }, - host: { - type: TypeCounts, - optional: true, - }, - vm: { - type: TypeCounts, - optional: true, - }, - other: { - type: TypeCounts, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Default)] -/// Counts of groups/snapshots per BackupType. -pub struct Counts { - /// The counts for CT backups - pub ct: Option, - /// The counts for Host backups - pub host: Option, - /// The counts for VM backups - pub vm: Option, - /// The counts for other backup types - pub other: Option, -} - -#[api()] -#[derive(Serialize, Deserialize, Default)] -/// Backup Type group/snapshot counts. -pub struct TypeCounts { - /// The number of groups of the type. - pub groups: u64, - /// The number of snapshots of the type. - pub snapshots: u64, -} - -#[api( - properties: { - "upid": { - optional: true, - type: UPID, - }, - }, -)] -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Garbage collection status. -pub struct GarbageCollectionStatus { - pub upid: Option, - /// Number of processed index files. - pub index_file_count: usize, - /// Sum of bytes referred by index files. - pub index_data_bytes: u64, - /// Bytes used on disk. - pub disk_bytes: u64, - /// Chunks used on disk. - pub disk_chunks: usize, - /// Sum of removed bytes. - pub removed_bytes: u64, - /// Number of removed chunks. - pub removed_chunks: usize, - /// Sum of pending bytes (pending removal - kept for safety). - pub pending_bytes: u64, - /// Number of pending chunks (pending removal - kept for safety). - pub pending_chunks: usize, - /// Number of chunks marked as .bad by verify that have been removed by GC. - pub removed_bad: usize, - /// Number of chunks still marked as .bad after garbage collection. - pub still_bad: usize, -} - -#[api( - properties: { - "status": { - type: GarbageCollectionStatus, - }, - } -)] -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Garbage Collection general info -pub struct GarbageCollectionJobStatus { - /// Datastore - pub store: String, - #[serde(flatten)] - pub status: GarbageCollectionStatus, - /// Schedule of the gc job - #[serde(skip_serializing_if = "Option::is_none")] - pub schedule: Option, - /// Time of the next gc run - #[serde(skip_serializing_if = "Option::is_none")] - pub next_run: Option, - /// Endtime of the last gc run - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_endtime: Option, - /// State of the last gc run - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_state: Option, - /// Duration of last gc run - #[serde(skip_serializing_if = "Option::is_none")] - pub duration: Option, -} - -#[api( - properties: { - "gc-status": { - type: GarbageCollectionStatus, - optional: true, - }, - counts: { - type: Counts, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Overall Datastore status and useful information. -pub struct DataStoreStatus { - /// Total space (bytes). - pub total: u64, - /// Used space (bytes). - pub used: u64, - /// Available space (bytes). - pub avail: u64, - /// Status of last GC - #[serde(skip_serializing_if = "Option::is_none")] - pub gc_status: Option, - /// Group/Snapshot counts - #[serde(skip_serializing_if = "Option::is_none")] - pub counts: Option, -} - -#[api( - properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - "mount-status": { - type: DataStoreMountStatus, - }, - history: { - type: Array, - optional: true, - items: { - type: Number, - description: "The usage of a time in the past. Either null or between 0.0 and 1.0.", - } - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Status of a Datastore -pub struct DataStoreStatusListItem { - pub store: String, - /// The Size of the underlying storage in bytes. - #[serde(skip_serializing_if = "Option::is_none")] - pub total: Option, - /// The used bytes of the underlying storage. - #[serde(skip_serializing_if = "Option::is_none")] - pub used: Option, - /// The available bytes of the underlying storage. (-1 on error) - #[serde(skip_serializing_if = "Option::is_none")] - pub avail: Option, - #[serde(default)] - pub mount_status: DataStoreMountStatus, - /// A list of usages of the past (last Month). - #[serde(skip_serializing_if = "Option::is_none")] - pub history: Option>>, - /// History start time (epoch) - #[serde(skip_serializing_if = "Option::is_none")] - pub history_start: Option, - /// History resolution (seconds) - #[serde(skip_serializing_if = "Option::is_none")] - pub history_delta: Option, - /// Estimation of the UNIX epoch when the storage will be full. - /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the - /// last Month. Missing if not enough data points are available yet. An estimate in the past - /// means that usage is declining or not changing. - #[serde(skip_serializing_if = "Option::is_none")] - pub estimated_full_date: Option, - /// An error description, for example, when the datastore could not be looked up - #[serde(skip_serializing_if = "Option::is_none")] - pub error: Option, - /// Status of last GC - #[serde(skip_serializing_if = "Option::is_none")] - pub gc_status: Option, -} - -impl DataStoreStatusListItem { - pub fn empty(store: &str, err: Option, mount_status: DataStoreMountStatus) -> Self { - DataStoreStatusListItem { - store: store.to_owned(), - total: None, - used: None, - avail: None, - mount_status, - history: None, - history_start: None, - history_delta: None, - estimated_full_date: None, - error: err, - gc_status: None, - } - } -} - -pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of snapshots.", - &SnapshotListItem::API_SCHEMA, - ) - .schema(), -}; - -pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of archive files inside a backup snapshots.", - &BackupContent::API_SCHEMA, - ) - .schema(), -}; - -pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of backup groups.", - &GroupListItem::API_SCHEMA, - ) - .schema(), -}; - -pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of backup namespaces.", - &NamespaceListItem::API_SCHEMA, - ) - .schema(), -}; - -pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new( - "Returns the list of snapshots and a flag indicating if there are kept or removed.", - &PruneListItem::API_SCHEMA, - ) - .schema(), -}; - -#[api( - properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - "max-depth": { - schema: NS_MAX_DEPTH_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// A namespace mapping -pub struct TapeRestoreNamespace { - /// The source datastore - pub store: String, - /// The source namespace. Root namespace if omitted. - pub source: Option, - /// The target namespace, - #[serde(skip_serializing_if = "Option::is_none")] - pub target: Option, - /// The (optional) recursion depth - #[serde(skip_serializing_if = "Option::is_none")] - pub max_depth: Option, -} - -pub const TAPE_RESTORE_NAMESPACE_SCHEMA: Schema = StringSchema::new("A namespace mapping") - .format(&ApiStringFormat::PropertyString( - &TapeRestoreNamespace::API_SCHEMA, - )) - .schema(); - -/// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z' -/// into a [`BackupNamespace`] and [`BackupDir`] -pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir), Error> { - match input.rmatch_indices('/').nth(2) { - Some((idx, _)) => { - let ns = BackupNamespace::from_path(&input[..idx])?; - let dir: BackupDir = input[(idx + 1)..].parse()?; - Ok((ns, dir)) - } - None => Ok((BackupNamespace::root(), input.parse()?)), - } -} - -/// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of -/// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z' -pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String { - if ns.is_root() { - dir.to_string() - } else { - format!("{}/{}", ns.display_as_path(), dir) - } -} - -/// Prints a Datastore name and [`BackupNamespace`] for logs/errors. -pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { - if ns.is_root() { - format!("datastore '{}', root namespace", store) - } else { - format!("datastore '{}', namespace '{}'", store, ns) - } -} - -pub const DELETE_STATS_COUNT_SCHEMA: Schema = - IntegerSchema::new("Number of entities").minimum(0).schema(); - -#[api( - properties: { - "removed-groups": { - schema: DELETE_STATS_COUNT_SCHEMA, - }, - "protected-snapshots": { - schema: DELETE_STATS_COUNT_SCHEMA, - }, - "removed-snapshots": { - schema: DELETE_STATS_COUNT_SCHEMA, - }, - }, -)] -#[derive(Default, Deserialize, Serialize)] -#[serde(rename_all = "kebab-case")] -/// Statistics for removed backup groups -pub struct BackupGroupDeleteStats { - // Count of removed groups - removed_groups: usize, - // Count of protected snapshots, therefore not removed - protected_snapshots: usize, - // Count of deleted snapshots - removed_snapshots: usize, -} - -impl BackupGroupDeleteStats { - pub fn all_removed(&self) -> bool { - self.protected_snapshots == 0 - } - - pub fn removed_groups(&self) -> usize { - self.removed_groups - } - - pub fn removed_snapshots(&self) -> usize { - self.removed_snapshots - } - - pub fn protected_snapshots(&self) -> usize { - self.protected_snapshots - } - - pub fn add(&mut self, rhs: &Self) { - self.removed_groups += rhs.removed_groups; - self.protected_snapshots += rhs.protected_snapshots; - self.removed_snapshots += rhs.removed_snapshots; - } - - pub fn increment_removed_groups(&mut self) { - self.removed_groups += 1; - } - - pub fn increment_removed_snapshots(&mut self) { - self.removed_snapshots += 1; - } - - pub fn increment_protected_snapshots(&mut self) { - self.protected_snapshots += 1; - } -} - -#[derive(Clone, PartialEq, Eq)] -/// Allowed variants of backup archives to be contained in a snapshot's manifest -pub enum ArchiveType { - FixedIndex, - DynamicIndex, - Blob, -} - -impl ArchiveType { - pub fn from_path(archive_name: impl AsRef) -> Result { - let archive_name = archive_name.as_ref(); - let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { - Some("didx") => ArchiveType::DynamicIndex, - Some("fidx") => ArchiveType::FixedIndex, - Some("blob") => ArchiveType::Blob, - _ => bail!("unknown archive type: {archive_name:?}"), - }; - Ok(archive_type) - } - - pub fn extension(&self) -> &'static str { - match self { - ArchiveType::DynamicIndex => "didx", - ArchiveType::FixedIndex => "fidx", - ArchiveType::Blob => "blob", - } - } -} - -#[derive(Clone, PartialEq, Eq)] -/// Name of archive files contained in snapshot's manifest -pub struct BackupArchiveName { - // archive name including the `.fidx`, `.didx` or `.blob` archive type extension - name: String, - // archive type parsed based on given extension - ty: ArchiveType, -} - -pub static MANIFEST_BLOB_NAME: LazyLock = LazyLock::new(|| BackupArchiveName { - name: "index.json.blob".to_string(), - ty: ArchiveType::Blob, -}); - -pub static CATALOG_NAME: LazyLock = LazyLock::new(|| BackupArchiveName { - name: "catalog.pcat1.didx".to_string(), - ty: ArchiveType::DynamicIndex, -}); - -pub static CLIENT_LOG_BLOB_NAME: LazyLock = - LazyLock::new(|| BackupArchiveName { - name: "client.log.blob".to_string(), - ty: ArchiveType::Blob, - }); - -pub static ENCRYPTED_KEY_BLOB_NAME: LazyLock = - LazyLock::new(|| BackupArchiveName { - name: "rsa-encrypted.key.blob".to_string(), - ty: ArchiveType::Blob, - }); - -impl fmt::Display for BackupArchiveName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{name}", name = self.name) - } -} - -serde_plain::derive_deserialize_from_fromstr!(BackupArchiveName, "archive name"); - -impl FromStr for BackupArchiveName { - type Err = Error; - - fn from_str(name: &str) -> Result { - Self::try_from(name) - } -} - -serde_plain::derive_serialize_from_display!(BackupArchiveName); - -impl TryFrom<&str> for BackupArchiveName { - type Error = anyhow::Error; - - fn try_from(value: &str) -> Result { - let (name, ty) = Self::parse_archive_type(value)?; - Ok(Self { name, ty }) - } -} - -impl AsRef for BackupArchiveName { - fn as_ref(&self) -> &str { - &self.name - } -} - -impl BackupArchiveName { - pub fn from_path(path: impl AsRef) -> Result { - let path = path.as_ref(); - if path.as_os_str().as_encoded_bytes().last() == Some(&b'/') { - bail!("invalid archive name, got directory"); - } - let file_name = path - .file_name() - .ok_or_else(|| format_err!("invalid archive name"))?; - let name = file_name - .to_str() - .ok_or_else(|| format_err!("archive name not valid UTF-8"))?; - - Self::try_from(name) - } - - pub fn archive_type(&self) -> ArchiveType { - self.ty.clone() - } - - pub fn ends_with(&self, postfix: &str) -> bool { - self.name.ends_with(postfix) - } - - pub fn has_pxar_filename_extension(&self) -> bool { - self.name.ends_with(".pxar.didx") - || self.name.ends_with(".mpxar.didx") - || self.name.ends_with(".ppxar.didx") - } - - pub fn without_type_extension(&self) -> String { - self.name - .strip_suffix(&format!(".{ext}", ext = self.ty.extension())) - .unwrap() - .into() - } - - fn parse_archive_type(archive_name: &str) -> Result<(String, ArchiveType), Error> { - // Detect archive type via given server archive name type extension, if present - if let Ok(archive_type) = ArchiveType::from_path(archive_name) { - return Ok((archive_name.into(), archive_type)); - } - - // No server archive name type extension in archive name, map based on extension - let archive_type = match Path::new(archive_name) - .extension() - .and_then(|ext| ext.to_str()) - { - Some("pxar") => ArchiveType::DynamicIndex, - Some("mpxar") => ArchiveType::DynamicIndex, - Some("ppxar") => ArchiveType::DynamicIndex, - Some("pcat1") => ArchiveType::DynamicIndex, - Some("img") => ArchiveType::FixedIndex, - Some("conf") => ArchiveType::Blob, - Some("json") => ArchiveType::Blob, - Some("key") => ArchiveType::Blob, - Some("log") => ArchiveType::Blob, - _ => bail!("failed to parse archive type for '{archive_name}'"), - }; - - Ok(( - format!("{archive_name}.{ext}", ext = archive_type.extension()), - archive_type, - )) - } -} - -impl ApiType for BackupArchiveName { - const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_invalid_backup_archive_names() { - let invalid_archive_names = [ - "/invalid/", - "/invalid/archive-name.pxar/", - "/invalid/archive-name.pxar.didx/", - "/invalid/..", - "/invalid/archive-name.invalid", - ]; - - for archive_name in invalid_archive_names { - assert!(BackupArchiveName::from_path(archive_name).is_err()); - } - } - - #[test] - fn test_valid_didx_backup_archive_names() { - let valid_archive_names = [ - "/valid/archive-name.pxar", - "/valid/archive-name.pxar.didx", - "/valid/archive-name.mpxar", - "/valid/archive-name.mpxar.didx", - "/valid/archive-name.ppxar", - "/valid/archive-name.ppxar.didx", - "/valid/archive-name.pcat1", - "/valid/archive-name.pcat1.didx", - ]; - - for archive_name in valid_archive_names { - let archive = BackupArchiveName::from_path(archive_name).unwrap(); - assert!(archive.as_ref().ends_with(".didx")); - assert!(archive.archive_type() == ArchiveType::DynamicIndex); - } - } - - #[test] - fn test_valid_fidx_backup_archive_names() { - let valid_archive_names = ["/valid/archive-name.img", "/valid/archive-name.img.fidx"]; - - for archive_name in valid_archive_names { - let archive = BackupArchiveName::from_path(archive_name).unwrap(); - assert!(archive.as_ref() == "archive-name.img.fidx"); - assert!(archive.without_type_extension() == "archive-name.img"); - assert!(archive.archive_type() == ArchiveType::FixedIndex); - } - } - - #[test] - fn test_valid_blob_backup_archive_names() { - let valid_archive_names = [ - "/valid/index.json", - "/valid/index.json.blob", - "/valid/rsa-encrypted.key", - "/valid/rsa-encrypted.key.blob", - "/valid/archive-name.log", - "/valid/archive-name.log.blob", - "/valid/qemu-server.conf", - "/valid/qemu-server.conf.blob", - ]; - - for archive_name in valid_archive_names { - let archive = BackupArchiveName::from_path(archive_name).unwrap(); - assert!(archive.as_ref().ends_with(".blob")); - assert!(archive.archive_type() == ArchiveType::Blob); - } - } -} diff --git a/pbs-api-types/src/file_restore.rs b/pbs-api-types/src/file_restore.rs deleted file mode 100644 index 90657d65..00000000 --- a/pbs-api-types/src/file_restore.rs +++ /dev/null @@ -1,30 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::api; - -#[api] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// General status information about a running VM file-restore daemon -pub struct RestoreDaemonStatus { - /// VM uptime in seconds - pub uptime: i64, - /// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is - /// not set, as then the status call will have reset the timer before returning the value - pub timeout: i64, -} - -#[api] -#[derive(Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "kebab-case")] -/// The desired format of the result. -pub enum FileRestoreFormat { - /// Plain file (only works for single files) - Plain, - /// PXAR archive - Pxar, - /// ZIP archive - Zip, - /// TAR archive - Tar, -} diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs deleted file mode 100644 index 04631d92..00000000 --- a/pbs-api-types/src/jobs.rs +++ /dev/null @@ -1,844 +0,0 @@ -use std::str::FromStr; - -use anyhow::bail; -use const_format::concatcp; -use regex::Regex; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::*; - -use crate::{ - Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid, - BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, - DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, - PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, -}; - -const_regex! { - - /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' - pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):"); - /// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID' - pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:"); -} - -pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.") - .format(&ApiStringFormat::VerifyFn( - proxmox_time::verify_calendar_event, - )) - .type_text("") - .schema(); - -pub const GC_SCHEDULE_SCHEMA: Schema = - StringSchema::new("Run garbage collection job at specified schedule.") - .format(&ApiStringFormat::VerifyFn( - proxmox_time::verify_calendar_event, - )) - .type_text("") - .schema(); - -pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.") - .format(&ApiStringFormat::VerifyFn( - proxmox_time::verify_calendar_event, - )) - .type_text("") - .schema(); - -pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = - StringSchema::new("Run verify job at specified schedule.") - .format(&ApiStringFormat::VerifyFn( - proxmox_time::verify_calendar_event, - )) - .type_text("") - .schema(); - -pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( - "Delete vanished backups. This remove the local copy if the remote backup was deleted.", -) -.default(false) -.schema(); - -#[api( - properties: { - "next-run": { - description: "Estimated time of the next run (UNIX epoch).", - optional: true, - type: Integer, - }, - "last-run-state": { - description: "Result of the last run.", - optional: true, - type: String, - }, - "last-run-upid": { - description: "Task UPID of the last run.", - optional: true, - type: String, - }, - "last-run-endtime": { - description: "Endtime of the last run.", - optional: true, - type: Integer, - }, - } -)] -#[derive(Serialize, Deserialize, Default, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Job Scheduling Status -pub struct JobScheduleStatus { - #[serde(skip_serializing_if = "Option::is_none")] - pub next_run: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_state: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_upid: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub last_run_endtime: Option, -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// When do we send notifications -pub enum Notify { - /// Never send notification - Never, - /// Send notifications for failed and successful jobs - Always, - /// Send notifications for failed jobs only - Error, -} - -#[api( - properties: { - gc: { - type: Notify, - optional: true, - }, - verify: { - type: Notify, - optional: true, - }, - sync: { - type: Notify, - optional: true, - }, - prune: { - type: Notify, - optional: true, - }, - }, -)] -#[derive(Debug, Serialize, Deserialize)] -/// Datastore notify settings -pub struct DatastoreNotify { - /// Garbage collection settings - #[serde(skip_serializing_if = "Option::is_none")] - pub gc: Option, - /// Verify job setting - #[serde(skip_serializing_if = "Option::is_none")] - pub verify: Option, - /// Sync job setting - #[serde(skip_serializing_if = "Option::is_none")] - pub sync: Option, - /// Prune job setting - #[serde(skip_serializing_if = "Option::is_none")] - pub prune: Option, -} - -pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( - "Datastore notification setting, enum can be one of 'always', 'never', or 'error'.", -) -.format(&ApiStringFormat::PropertyString( - &DatastoreNotify::API_SCHEMA, -)) -.schema(); - -pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( - "Do not verify backups that are already verified if their verification is not outdated.", -) -.default(true) -.schema(); - -pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = - IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'") - .minimum(0) - .schema(); - -#[api( - properties: { - id: { - schema: JOB_ID_SCHEMA, - }, - store: { - schema: DATASTORE_SCHEMA, - }, - "ignore-verified": { - optional: true, - schema: IGNORE_VERIFIED_BACKUPS_SCHEMA, - }, - "outdated-after": { - optional: true, - schema: VERIFICATION_OUTDATED_AFTER_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - schedule: { - optional: true, - schema: VERIFICATION_SCHEDULE_SCHEMA, - }, - ns: { - optional: true, - schema: BACKUP_NAMESPACE_SCHEMA, - }, - "max-depth": { - optional: true, - schema: crate::NS_MAX_DEPTH_SCHEMA, - }, - } -)] -#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Verification Job -pub struct VerificationJobConfig { - /// unique ID to address this job - #[updater(skip)] - pub id: String, - /// the datastore ID this verification job affects - pub store: String, - #[serde(skip_serializing_if = "Option::is_none")] - /// if not set to false, check the age of the last snapshot verification to filter - /// out recent ones, depending on 'outdated_after' configuration. - pub ignore_verified: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. - pub outdated_after: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// when to schedule this job in calendar event notation - pub schedule: Option, - #[serde(skip_serializing_if = "Option::is_none", default)] - /// on which backup namespace to run the verification recursively - pub ns: Option, - #[serde(skip_serializing_if = "Option::is_none", default)] - /// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the - /// snapshots on the same level as the passed `ns`, or the datastore root if none. - pub max_depth: Option, -} - -impl VerificationJobConfig { - pub fn acl_path(&self) -> Vec<&str> { - match self.ns.as_ref() { - Some(ns) => ns.acl_path(&self.store), - None => vec!["datastore", &self.store], - } - } -} - -#[api( - properties: { - config: { - type: VerificationJobConfig, - }, - status: { - type: JobScheduleStatus, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Status of Verification Job -pub struct VerificationJobStatus { - #[serde(flatten)] - pub config: VerificationJobConfig, - #[serde(flatten)] - pub status: JobScheduleStatus, -} - -#[api( - properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - pool: { - schema: MEDIA_POOL_NAME_SCHEMA, - }, - drive: { - schema: DRIVE_NAME_SCHEMA, - }, - "eject-media": { - description: "Eject media upon job completion.", - type: bool, - optional: true, - }, - "export-media-set": { - description: "Export media set upon job completion.", - type: bool, - optional: true, - }, - "latest-only": { - description: "Backup latest snapshots only.", - type: bool, - optional: true, - }, - "notify-user": { - optional: true, - type: Userid, - }, - "group-filter": { - schema: GROUP_FILTER_LIST_SCHEMA, - optional: true, - }, - ns: { - type: BackupNamespace, - optional: true, - }, - "max-depth": { - schema: crate::NS_MAX_DEPTH_SCHEMA, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Tape Backup Job Setup -pub struct TapeBackupJobSetup { - pub store: String, - pub pool: String, - pub drive: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub eject_media: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub export_media_set: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub latest_only: Option, - /// Send job email notification to this user - #[serde(skip_serializing_if = "Option::is_none")] - pub notify_user: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub notification_mode: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub group_filter: Option>, - #[serde(skip_serializing_if = "Option::is_none", default)] - pub ns: Option, - #[serde(skip_serializing_if = "Option::is_none", default)] - pub max_depth: Option, -} - -#[api( - properties: { - id: { - schema: JOB_ID_SCHEMA, - }, - setup: { - type: TapeBackupJobSetup, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - schedule: { - optional: true, - schema: SYNC_SCHEDULE_SCHEMA, - }, - } -)] -#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Tape Backup Job -pub struct TapeBackupJobConfig { - #[updater(skip)] - pub id: String, - #[serde(flatten)] - pub setup: TapeBackupJobSetup, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub schedule: Option, -} - -#[api( - properties: { - config: { - type: TapeBackupJobConfig, - }, - status: { - type: JobScheduleStatus, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Status of Tape Backup Job -pub struct TapeBackupJobStatus { - #[serde(flatten)] - pub config: TapeBackupJobConfig, - #[serde(flatten)] - pub status: JobScheduleStatus, - /// Next tape used (best guess) - #[serde(skip_serializing_if = "Option::is_none")] - pub next_media_label: Option, -} - -#[derive(Clone, Debug)] -/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. -pub enum FilterType { - /// BackupGroup type - either `vm`, `ct`, or `host`. - BackupType(BackupType), - /// Full identifier of BackupGroup, including type - Group(String), - /// A regular expression matched against the full identifier of the BackupGroup - Regex(Regex), -} - -impl PartialEq for FilterType { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::BackupType(a), Self::BackupType(b)) => a == b, - (Self::Group(a), Self::Group(b)) => a == b, - (Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(), - _ => false, - } - } -} - -impl std::str::FromStr for FilterType { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(match s.split_once(':') { - Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?, - Some(("type", value)) => FilterType::BackupType(value.parse()?), - Some(("regex", value)) => FilterType::Regex(Regex::new(value)?), - Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty), - None => bail!("input doesn't match expected format '|regex:REGEX>'"), - }) - } -} - -// used for serializing below, caution! -impl std::fmt::Display for FilterType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type), - FilterType::Group(backup_group) => write!(f, "group:{}", backup_group), - FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()), - } - } -} - -#[derive(Clone, Debug)] -pub struct GroupFilter { - pub is_exclude: bool, - pub filter_type: FilterType, -} - -impl PartialEq for GroupFilter { - fn eq(&self, other: &Self) -> bool { - self.filter_type == other.filter_type && self.is_exclude == other.is_exclude - } -} - -impl Eq for GroupFilter {} - -impl std::str::FromStr for GroupFilter { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - let (is_exclude, type_str) = match s.split_once(':') { - Some(("include", value)) => (false, value), - Some(("exclude", value)) => (true, value), - _ => (false, s), - }; - - Ok(GroupFilter { - is_exclude, - filter_type: type_str.parse()?, - }) - } -} - -// used for serializing below, caution! -impl std::fmt::Display for GroupFilter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.is_exclude { - f.write_str("exclude:")?; - } - std::fmt::Display::fmt(&self.filter_type, f) - } -} - -proxmox_serde::forward_deserialize_to_from_str!(GroupFilter); -proxmox_serde::forward_serialize_to_display!(GroupFilter); - -fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> { - GroupFilter::from_str(input).map(|_| ()) -} - -pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( - "Group filter based on group identifier ('group:GROUP'), group type ('type:'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.") - .format(&ApiStringFormat::VerifyFn(verify_group_filter)) - .type_text("[]|group:GROUP|regex:RE>") - .schema(); - -pub const GROUP_FILTER_LIST_SCHEMA: Schema = - ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); - -pub const TRANSFER_LAST_SCHEMA: Schema = - IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others") - .minimum(1) - .schema(); - -#[api()] -#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Direction of the sync job, push or pull -pub enum SyncDirection { - /// Sync direction pull - #[default] - Pull, - /// Sync direction push - Push, -} - -impl std::fmt::Display for SyncDirection { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - SyncDirection::Pull => f.write_str("pull"), - SyncDirection::Push => f.write_str("push"), - } - } -} - -pub const RESYNC_CORRUPT_SCHEMA: Schema = - BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") - .schema(); - -#[api( - properties: { - id: { - schema: JOB_ID_SCHEMA, - }, - store: { - schema: DATASTORE_SCHEMA, - }, - ns: { - type: BackupNamespace, - optional: true, - }, - "owner": { - type: Authid, - optional: true, - }, - remote: { - schema: REMOTE_ID_SCHEMA, - optional: true, - }, - "remote-store": { - schema: DATASTORE_SCHEMA, - }, - "remote-ns": { - type: BackupNamespace, - optional: true, - }, - "remove-vanished": { - schema: REMOVE_VANISHED_BACKUPS_SCHEMA, - optional: true, - }, - "max-depth": { - schema: NS_MAX_DEPTH_REDUCED_SCHEMA, - optional: true, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - limit: { - type: RateLimitConfig, - }, - schedule: { - optional: true, - schema: SYNC_SCHEDULE_SCHEMA, - }, - "group-filter": { - schema: GROUP_FILTER_LIST_SCHEMA, - optional: true, - }, - "transfer-last": { - schema: TRANSFER_LAST_SCHEMA, - optional: true, - }, - "resync-corrupt": { - schema: RESYNC_CORRUPT_SCHEMA, - optional: true, - }, - "sync-direction": { - type: SyncDirection, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Sync Job -pub struct SyncJobConfig { - #[updater(skip)] - pub id: String, - pub store: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub ns: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub owner: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// None implies local sync. - pub remote: Option, - pub remote_store: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub remote_ns: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub remove_vanished: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub max_depth: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub schedule: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub group_filter: Option>, - #[serde(flatten)] - pub limit: RateLimitConfig, - #[serde(skip_serializing_if = "Option::is_none")] - pub transfer_last: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub resync_corrupt: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_direction: Option, -} - -impl SyncJobConfig { - pub fn acl_path(&self) -> Vec<&str> { - match self.ns.as_ref() { - Some(ns) => ns.acl_path(&self.store), - None => vec!["datastore", &self.store], - } - } - - pub fn remote_acl_path(&self) -> Option> { - let remote = self.remote.as_ref()?; - match &self.remote_ns { - Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)), - None => Some(vec!["remote", remote, &self.remote_store]), - } - } -} - -#[api( - properties: { - config: { - type: SyncJobConfig, - }, - status: { - type: JobScheduleStatus, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Status of Sync Job -pub struct SyncJobStatus { - #[serde(flatten)] - pub config: SyncJobConfig, - #[serde(flatten)] - pub status: JobScheduleStatus, -} - -/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API -/// call to prune a specific group, where `max-depth` makes no sense. -#[api( - properties: { - "keep-last": { - schema: crate::PRUNE_SCHEMA_KEEP_LAST, - optional: true, - }, - "keep-hourly": { - schema: crate::PRUNE_SCHEMA_KEEP_HOURLY, - optional: true, - }, - "keep-daily": { - schema: crate::PRUNE_SCHEMA_KEEP_DAILY, - optional: true, - }, - "keep-weekly": { - schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY, - optional: true, - }, - "keep-monthly": { - schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY, - optional: true, - }, - "keep-yearly": { - schema: crate::PRUNE_SCHEMA_KEEP_YEARLY, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Common pruning options -pub struct KeepOptions { - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_last: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_hourly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_daily: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_weekly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_monthly: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub keep_yearly: Option, -} - -impl KeepOptions { - pub fn keeps_something(&self) -> bool { - self.keep_last.unwrap_or(0) - + self.keep_hourly.unwrap_or(0) - + self.keep_daily.unwrap_or(0) - + self.keep_weekly.unwrap_or(0) - + self.keep_monthly.unwrap_or(0) - + self.keep_yearly.unwrap_or(0) - > 0 - } -} - -#[api( - properties: { - keep: { - type: KeepOptions, - }, - ns: { - type: BackupNamespace, - optional: true, - }, - "max-depth": { - schema: NS_MAX_DEPTH_REDUCED_SCHEMA, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Common pruning options -pub struct PruneJobOptions { - #[serde(flatten)] - pub keep: KeepOptions, - - /// The (optional) recursion depth - #[serde(skip_serializing_if = "Option::is_none")] - pub max_depth: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub ns: Option, -} - -impl PruneJobOptions { - pub fn keeps_something(&self) -> bool { - self.keep.keeps_something() - } - - pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { - match &self.ns { - Some(ns) => ns.acl_path(store), - None => vec!["datastore", store], - } - } -} - -#[api( - properties: { - disable: { - type: Boolean, - optional: true, - default: false, - }, - id: { - schema: JOB_ID_SCHEMA, - }, - store: { - schema: DATASTORE_SCHEMA, - }, - schedule: { - schema: PRUNE_SCHEDULE_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - options: { - type: PruneJobOptions, - }, - }, -)] -#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Prune configuration. -pub struct PruneJobConfig { - /// unique ID to address this job - #[updater(skip)] - pub id: String, - - pub store: String, - - /// Disable this job. - #[serde(default, skip_serializing_if = "is_false")] - #[updater(serde(skip_serializing_if = "Option::is_none"))] - pub disable: bool, - - pub schedule: String, - - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - - #[serde(flatten)] - pub options: PruneJobOptions, -} - -impl PruneJobConfig { - pub fn acl_path(&self) -> Vec<&str> { - self.options.acl_path(&self.store) - } -} - -fn is_false(b: &bool) -> bool { - !b -} - -#[api( - properties: { - config: { - type: PruneJobConfig, - }, - status: { - type: JobScheduleStatus, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Status of prune job -pub struct PruneJobStatus { - #[serde(flatten)] - pub config: PruneJobConfig, - #[serde(flatten)] - pub status: JobScheduleStatus, -} diff --git a/pbs-api-types/src/key_derivation.rs b/pbs-api-types/src/key_derivation.rs deleted file mode 100644 index 8d6cbc89..00000000 --- a/pbs-api-types/src/key_derivation.rs +++ /dev/null @@ -1,55 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::api; - -use crate::CERT_FINGERPRINT_SHA256_SCHEMA; - -#[api(default: "scrypt")] -#[derive(Clone, Copy, Debug, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -/// Key derivation function for password protected encryption keys. -pub enum Kdf { - /// Do not encrypt the key. - None, - /// Encrypt they key with a password using SCrypt. - Scrypt, - /// Encrtypt the Key with a password using PBKDF2 - PBKDF2, -} - -impl Default for Kdf { - #[inline] - fn default() -> Self { - Kdf::Scrypt - } -} - -#[api( - properties: { - kdf: { - type: Kdf, - }, - fingerprint: { - schema: CERT_FINGERPRINT_SHA256_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Deserialize, Serialize)] -/// Encryption Key Information -pub struct KeyInfo { - /// Path to key (if stored in a file) - #[serde(skip_serializing_if = "Option::is_none")] - pub path: Option, - pub kdf: Kdf, - /// Key creation time - pub created: i64, - /// Key modification time - pub modified: i64, - /// Key fingerprint - #[serde(skip_serializing_if = "Option::is_none")] - pub fingerprint: Option, - /// Password hint - #[serde(skip_serializing_if = "Option::is_none")] - pub hint: Option, -} diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs deleted file mode 100644 index a3e0407b..00000000 --- a/pbs-api-types/src/ldap.rs +++ /dev/null @@ -1,208 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater}; - -use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA}; - -#[api()] -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] -/// LDAP connection type -pub enum LdapMode { - /// Plaintext LDAP connection - #[serde(rename = "ldap")] - #[default] - Ldap, - /// Secure STARTTLS connection - #[serde(rename = "ldap+starttls")] - StartTls, - /// Secure LDAPS connection - #[serde(rename = "ldaps")] - Ldaps, -} - -#[api( - properties: { - "realm": { - schema: REALM_ID_SCHEMA, - }, - "comment": { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - "verify": { - optional: true, - default: false, - }, - "sync-defaults-options": { - schema: SYNC_DEFAULTS_STRING_SCHEMA, - optional: true, - }, - "sync-attributes": { - schema: SYNC_ATTRIBUTES_SCHEMA, - optional: true, - }, - "user-classes" : { - optional: true, - schema: USER_CLASSES_SCHEMA, - }, - "base-dn" : { - schema: LDAP_DOMAIN_SCHEMA, - }, - "bind-dn" : { - schema: LDAP_DOMAIN_SCHEMA, - optional: true, - } - }, -)] -#[derive(Serialize, Deserialize, Updater, Clone)] -#[serde(rename_all = "kebab-case")] -/// LDAP configuration properties. -pub struct LdapRealmConfig { - #[updater(skip)] - pub realm: String, - /// LDAP server address - pub server1: String, - /// Fallback LDAP server address - #[serde(skip_serializing_if = "Option::is_none")] - pub server2: Option, - /// Port - #[serde(skip_serializing_if = "Option::is_none")] - pub port: Option, - /// Base domain name. Users are searched under this domain using a `subtree search`. - pub base_dn: String, - /// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``. - pub user_attr: String, - /// Comment - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// Connection security - #[serde(skip_serializing_if = "Option::is_none")] - pub mode: Option, - /// Verify server certificate - #[serde(skip_serializing_if = "Option::is_none")] - pub verify: Option, - /// CA certificate to use for the server. The path can point to - /// either a file, or a directory. If it points to a file, - /// the PEM-formatted X.509 certificate stored at the path - /// will be added as a trusted certificate. - /// If the path points to a directory, - /// the directory replaces the system's default certificate - /// store at `/etc/ssl/certs` - Every file in the directory - /// will be loaded as a trusted certificate. - #[serde(skip_serializing_if = "Option::is_none")] - pub capath: Option, - /// Bind domain to use for looking up users - #[serde(skip_serializing_if = "Option::is_none")] - pub bind_dn: Option, - /// Custom LDAP search filter for user sync - #[serde(skip_serializing_if = "Option::is_none")] - pub filter: Option, - /// Default options for LDAP sync - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_defaults_options: Option, - /// List of attributes to sync from LDAP to user config - #[serde(skip_serializing_if = "Option::is_none")] - pub sync_attributes: Option, - /// User ``objectClass`` classes to sync - #[serde(skip_serializing_if = "Option::is_none")] - pub user_classes: Option, -} - -#[api( - properties: { - "remove-vanished": { - optional: true, - schema: REMOVE_VANISHED_SCHEMA, - }, - }, - -)] -#[derive(Serialize, Deserialize, Updater, Default, Debug)] -#[serde(rename_all = "kebab-case")] -/// Default options for LDAP synchronization runs -pub struct SyncDefaultsOptions { - /// How to handle vanished properties/users - pub remove_vanished: Option, - /// Enable new users after sync - pub enable_new: Option, -} - -#[api()] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -#[serde(rename_all = "kebab-case")] -/// remove-vanished options -pub enum RemoveVanished { - /// Delete ACLs for vanished users - Acl, - /// Remove vanished users - Entry, - /// Remove vanished properties from users (e.g. email) - Properties, -} - -pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema(); - -pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options") - .format(&ApiStringFormat::PropertyString( - &SyncDefaultsOptions::API_SCHEMA, - )) - .schema(); - -const REMOVE_VANISHED_DESCRIPTION: &str = - "A semicolon-separated list of things to remove when they or the user \ -vanishes during user synchronization. The following values are possible: ``entry`` removes the \ -user when not returned from the sync; ``properties`` removes any \ -properties on existing user that do not appear in the source. \ -``acl`` removes ACLs when the user is not returned from the sync."; - -pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION) - .format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY)) - .schema(); - -pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new( - "Array of remove-vanished options", - &RemoveVanished::API_SCHEMA, -) -.min_length(1) -.schema(); - -#[api()] -#[derive(Serialize, Deserialize, Updater, Default, Debug)] -#[serde(rename_all = "kebab-case")] -/// Determine which LDAP attributes should be synced to which user attributes -pub struct SyncAttributes { - /// Name of the LDAP attribute containing the user's email address - pub email: Option, - /// Name of the LDAP attribute containing the user's first name - pub firstname: Option, - /// Name of the LDAP attribute containing the user's last name - pub lastname: Option, -} - -const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \ -which LDAP attributes map to which PBS user field. For example, \ -to map the LDAP attribute ``mail`` to PBS's ``email``, write \ -``email=mail``."; - -pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT) - .format(&ApiStringFormat::PropertyString( - &SyncAttributes::API_SCHEMA, - )) - .schema(); - -pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new( - "Array of user classes", - &StringSchema::new("user class").schema(), -) -.min_length(1) -.schema(); - -const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \ -user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \ -then user synchronization will consider all LDAP entities \ -where ``objectClass: person`` `or` ``objectClass: user``."; - -pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT) - .format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY)) - .default("inetorgperson,posixaccount,person,user") - .schema(); diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs deleted file mode 100644 index acc2fca3..00000000 --- a/pbs-api-types/src/lib.rs +++ /dev/null @@ -1,373 +0,0 @@ -//! Basic API types used by most of the PBS code. - -use const_format::concatcp; -use serde::{Deserialize, Serialize}; - -pub mod percent_encoding; - -use proxmox_schema::{ - api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema, -}; -use proxmox_time::parse_daily_duration; - -use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR}; - -pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT; -pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX; -pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR; -pub use proxmox_schema::api_types::{ - BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX, -}; -pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX}; -pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX}; -pub use proxmox_schema::api_types::{ - GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX, -}; -pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX}; -pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX}; - -pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX}; -pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX}; -pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX}; -pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX}; -pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX}; -pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX}; - -pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA; -pub use proxmox_schema::api_types::HOSTNAME_SCHEMA; -pub use proxmox_schema::api_types::HOST_PORT_SCHEMA; -pub use proxmox_schema::api_types::HTTP_URL_SCHEMA; -pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA; -pub use proxmox_schema::api_types::NODE_SCHEMA; -pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT; -pub use proxmox_schema::api_types::{ - BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, -}; -pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT}; -pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA}; -pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA}; -pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA}; -pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT}; -pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA}; - -use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR}; - -// re-export APT API types -pub use proxmox_apt_api_types::{ - APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile, - APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository, - APTUpdateInfo, APTUpdateOptions, -}; - -#[rustfmt::skip] -pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*"; - -#[rustfmt::skip] -pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)"; - -#[rustfmt::skip] -pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"; - -#[rustfmt::skip] -pub const BACKUP_NS_RE: &str = - concatcp!("(?:", - "(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR, - ")?"); - -#[rustfmt::skip] -pub const BACKUP_NS_PATH_RE: &str = - concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/"); - -#[rustfmt::skip] -pub const SNAPSHOT_PATH_REGEX_STR: &str = - concatcp!( - r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")", - ); - -#[rustfmt::skip] -pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str = - concatcp!( - r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?", - ); - -mod acl; -pub use acl::*; - -mod datastore; -pub use datastore::*; - -mod jobs; -pub use jobs::*; - -mod key_derivation; -pub use key_derivation::{Kdf, KeyInfo}; - -mod maintenance; -pub use maintenance::*; - -mod network; -pub use network::*; - -mod node; -pub use node::*; - -pub use proxmox_auth_api::types as userid; -pub use proxmox_auth_api::types::{Authid, Userid}; -pub use proxmox_auth_api::types::{Realm, RealmRef}; -pub use proxmox_auth_api::types::{Tokenname, TokennameRef}; -pub use proxmox_auth_api::types::{Username, UsernameRef}; -pub use proxmox_auth_api::types::{ - PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, -}; - -#[macro_use] -mod user; -pub use user::*; - -pub use proxmox_schema::upid::*; - -mod crypto; -pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint}; - -pub mod file_restore; - -mod openid; -pub use openid::*; - -mod ldap; -pub use ldap::*; - -mod ad; -pub use ad::*; - -mod remote; -pub use remote::*; - -mod pathpatterns; -pub use pathpatterns::*; - -mod tape; -pub use tape::*; - -mod traffic_control; -pub use traffic_control::*; - -mod zfs; -pub use zfs::*; - -mod metrics; -pub use metrics::*; - -mod version; -pub use version::*; - -const_regex! { - // just a rough check - dummy acceptor is used before persisting - pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; - - pub BACKUP_REPO_URL_REGEX = concatcp!( - r"^^(?:(?:(", - USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR, - ")@)?(", - DNS_NAME_STR, "|", IPRE_BRACKET_STR, - "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$" - ); - - pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); -} - -pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); - -pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX); - -pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX); - -pub const DAILY_DURATION_FORMAT: ApiStringFormat = - ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop)); - -pub const SEARCH_DOMAIN_SCHEMA: Schema = - StringSchema::new("Search domain for host-name lookup.").schema(); - -pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.") - .format(&IP_FORMAT) - .schema(); - -pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.") - .format(&IP_FORMAT) - .schema(); - -pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.") - .format(&IP_FORMAT) - .schema(); - -pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema = - StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2") - .format(&OPENSSL_CIPHERS_TLS_FORMAT) - .schema(); - -pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema = - StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3") - .format(&OPENSSL_CIPHERS_TLS_FORMAT) - .schema(); - -pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.") - .format(&PASSWORD_FORMAT) - .min_length(8) - .max_length(64) - .schema(); - -pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(2) - .max_length(32) - .schema(); - -pub const SUBSCRIPTION_KEY_SCHEMA: Schema = - StringSchema::new("Proxmox Backup Server subscription key.") - .format(&SUBSCRIPTION_KEY_FORMAT) - .min_length(15) - .max_length(16) - .schema(); - -pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new( - "Prevent changes if current configuration file has different \ - SHA256 digest. This can be used to prevent concurrent \ - modifications.", -) -.format(&PVE_CONFIG_DIGEST_FORMAT) -.schema(); - -/// API schema format definition for repository URLs -pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX); - -// Complex type definitions - -#[api()] -#[derive(Default, Serialize, Deserialize)] -/// Storage space usage information. -pub struct StorageStatus { - /// Total space (bytes). - pub total: u64, - /// Used space (bytes). - pub used: u64, - /// Available space (bytes). - pub avail: u64, -} - -pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(1) - .max_length(64) - .schema(); - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Node Power command type. -pub enum NodePowerCommand { - /// Restart the server - Reboot, - /// Shutdown the server - Shutdown, -} - -#[api()] -#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// The state (result) of a finished worker task. -pub enum TaskStateType { - /// Ok - OK, - /// Warning - Warning, - /// Error - Error, - /// Unknown - Unknown, -} - -#[api( - properties: { - upid: { schema: UPID::API_SCHEMA }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -/// Task properties. -pub struct TaskListItem { - pub upid: String, - /// The node name where the task is running on. - pub node: String, - /// The Unix PID - pub pid: i64, - /// The task start time (Epoch) - pub pstart: u64, - /// The task start time (Epoch) - pub starttime: i64, - /// Worker type (arbitrary ASCII string) - pub worker_type: String, - /// Worker ID (arbitrary ASCII string) - pub worker_id: Option, - /// The authenticated entity who started the task - pub user: String, - /// The task end time (Epoch) - #[serde(skip_serializing_if = "Option::is_none")] - pub endtime: Option, - /// Task end status - #[serde(skip_serializing_if = "Option::is_none")] - pub status: Option, -} - -pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { - optional: false, - schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), -}; - -#[api] -#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] -#[serde(rename_all = "lowercase")] -/// type of the realm -pub enum RealmType { - /// The PAM realm - Pam, - /// The PBS realm - Pbs, - /// An OpenID Connect realm - OpenId, - /// An LDAP realm - Ldap, - /// An Active Directory (AD) realm - Ad, -} - -serde_plain::derive_display_from_serialize!(RealmType); -serde_plain::derive_fromstr_from_deserialize!(RealmType); - -#[api( - properties: { - realm: { - schema: REALM_ID_SCHEMA, - }, - "type": { - type: RealmType, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Deserialize, Serialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Basic Information about a realm -pub struct BasicRealmInfo { - pub realm: String, - #[serde(rename = "type")] - pub ty: RealmType, - /// True if it is the default realm - #[serde(skip_serializing_if = "Option::is_none")] - pub default: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs deleted file mode 100644 index 3c9aa819..00000000 --- a/pbs-api-types/src/maintenance.rs +++ /dev/null @@ -1,110 +0,0 @@ -use anyhow::{bail, Error}; -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; - -use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; - -const_regex! { - pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$"; -} - -pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX); - -pub const MAINTENANCE_MESSAGE_SCHEMA: Schema = - StringSchema::new("Message describing the reason for the maintenance.") - .format(&MAINTENANCE_MESSAGE_FORMAT) - .max_length(64) - .schema(); - -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -/// Operation requirements, used when checking for maintenance mode. -pub enum Operation { - /// for any read operation like backup restore or RRD metric collection - Read, - /// for any write/delete operation, like backup create or GC - Write, - /// for any purely logical operation on the in-memory state of the datastore, e.g., to check if - /// some mutex could be locked (e.g., GC already running?) - /// - /// NOTE: one must *not* do any IO operations when only helding this Op state - Lookup, - // GarbageCollect or Delete? -} - -#[api] -#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)] -#[serde(rename_all = "kebab-case")] -/// Maintenance type. -pub enum MaintenanceType { - // TODO: - // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate - // operation, so that one can enable a mode where nothing new can be added but stuff can be - // cleaned - /// Only read operations are allowed on the datastore. - ReadOnly, - /// Neither read nor write operations are allowed on the datastore. - Offline, - /// The datastore is being deleted. - Delete, - /// The (removable) datastore is being unmounted. - Unmount, -} -serde_plain::derive_display_from_serialize!(MaintenanceType); -serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); - -#[api( - properties: { - type: { - type: MaintenanceType, - }, - message: { - optional: true, - schema: MAINTENANCE_MESSAGE_SCHEMA, - } - }, - default_key: "type", -)] -#[derive(Deserialize, Serialize)] -/// Maintenance mode -pub struct MaintenanceMode { - /// Type of maintenance ("read-only" or "offline"). - #[serde(rename = "type")] - pub ty: MaintenanceType, - - /// Reason for maintenance. - #[serde(skip_serializing_if = "Option::is_none")] - pub message: Option, -} - -impl MaintenanceMode { - /// Used for deciding whether the datastore is cleared from the internal cache - pub fn clear_from_cache(&self) -> bool { - self.ty == MaintenanceType::Offline - || self.ty == MaintenanceType::Delete - || self.ty == MaintenanceType::Unmount - } - - pub fn check(&self, operation: Option) -> Result<(), Error> { - if self.ty == MaintenanceType::Delete { - bail!("datastore is being deleted"); - } - - let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or("")) - .decode_utf8() - .unwrap_or(Cow::Borrowed("")); - - if let Some(Operation::Lookup) = operation { - return Ok(()); - } else if self.ty == MaintenanceType::Unmount { - bail!("datastore is being unmounted"); - } else if self.ty == MaintenanceType::Offline { - bail!("offline maintenance mode: {}", message); - } else if self.ty == MaintenanceType::ReadOnly { - if let Some(Operation::Write) = operation { - bail!("read-only maintenance mode: {}", message); - } - } - Ok(()) - } -} diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs deleted file mode 100644 index 014e28e4..00000000 --- a/pbs-api-types/src/metrics.rs +++ /dev/null @@ -1,255 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::{ - HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, -}; -use proxmox_schema::{api, Schema, StringSchema, Updater}; - -pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.") - .min_length(3) - .max_length(32) - .default("proxmox") - .schema(); - -pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.") - .min_length(3) - .max_length(32) - .default("proxmox") - .schema(); - -fn return_true() -> bool { - true -} - -fn is_true(b: &bool) -> bool { - *b -} - -#[api( - properties: { - name: { - schema: METRIC_SERVER_ID_SCHEMA, - }, - enable: { - type: bool, - optional: true, - default: true, - }, - host: { - schema: HOST_PORT_SCHEMA, - }, - mtu: { - type: u16, - optional: true, - default: 1500, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize, Updater)] -#[serde(rename_all = "kebab-case")] -/// InfluxDB Server (UDP) -pub struct InfluxDbUdp { - #[updater(skip)] - pub name: String, - #[serde(default = "return_true", skip_serializing_if = "is_true")] - #[updater(serde(skip_serializing_if = "Option::is_none"))] - /// Enables or disables the metrics server - pub enable: bool, - /// the host + port - pub host: String, - #[serde(skip_serializing_if = "Option::is_none")] - /// The MTU - pub mtu: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[api( - properties: { - name: { - schema: METRIC_SERVER_ID_SCHEMA, - }, - enable: { - type: bool, - optional: true, - default: true, - }, - url: { - schema: HTTP_URL_SCHEMA, - }, - token: { - type: String, - optional: true, - }, - bucket: { - schema: INFLUXDB_BUCKET_SCHEMA, - optional: true, - }, - organization: { - schema: INFLUXDB_ORGANIZATION_SCHEMA, - optional: true, - }, - "max-body-size": { - type: usize, - optional: true, - default: 25_000_000, - }, - "verify-tls": { - type: bool, - optional: true, - default: true, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize, Updater)] -#[serde(rename_all = "kebab-case")] -/// InfluxDB Server (HTTP(s)) -pub struct InfluxDbHttp { - #[updater(skip)] - pub name: String, - #[serde(default = "return_true", skip_serializing_if = "is_true")] - #[updater(serde(skip_serializing_if = "Option::is_none"))] - /// Enables or disables the metrics server - pub enable: bool, - /// The base url of the influxdb server - pub url: String, - #[serde(skip_serializing_if = "Option::is_none")] - /// The (optional) API token - pub token: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// Named location where time series data is stored - pub bucket: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// Workspace for a group of users - pub organization: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// The (optional) maximum body size - pub max_body_size: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// If true, the certificate will be validated. - pub verify_tls: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[api] -#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)] -/// Type of the metric server -pub enum MetricServerType { - /// InfluxDB HTTP - #[serde(rename = "influxdb-http")] - InfluxDbHttp, - /// InfluxDB UDP - #[serde(rename = "influxdb-udp")] - InfluxDbUdp, -} - -#[api( - properties: { - name: { - schema: METRIC_SERVER_ID_SCHEMA, - }, - "type": { - type: MetricServerType, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[serde(rename_all = "kebab-case")] -/// Basic information about a metric server that's available for all types -pub struct MetricServerInfo { - pub name: String, - #[serde(rename = "type")] - pub ty: MetricServerType, - /// Enables or disables the metrics server - #[serde(skip_serializing_if = "Option::is_none")] - pub enable: Option, - /// The target server - pub server: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[api( - properties: { - data: { - type: Array, - items: { - type: MetricDataPoint, - } - } - } -)] -/// Return type for the metric API endpoint -pub struct Metrics { - /// List of metric data points, sorted by timestamp - pub data: Vec, -} - -#[api( - properties: { - id: { - type: String, - }, - metric: { - type: String, - }, - timestamp: { - type: Integer, - }, - }, -)] -/// Metric data point -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct MetricDataPoint { - /// Unique identifier for this metric object, for instance `node/` - /// or `qemu/`. - pub id: String, - - /// Name of the metric. - pub metric: String, - - /// Time at which this metric was observed - pub timestamp: i64, - - #[serde(rename = "type")] - pub ty: MetricDataType, - - /// Metric value. - pub value: f64, -} - -#[api] -/// Type of the metric. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum MetricDataType { - /// gauge. - Gauge, - /// counter. - Counter, - /// derive. - Derive, -} - -serde_plain::derive_display_from_serialize!(MetricDataType); -serde_plain::derive_fromstr_from_deserialize!(MetricDataType); diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs deleted file mode 100644 index fe083dc6..00000000 --- a/pbs-api-types/src/network.rs +++ /dev/null @@ -1,345 +0,0 @@ -use std::fmt; - -use serde::{Deserialize, Serialize}; - -use proxmox_schema::*; - -use crate::{ - CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT, - PROXMOX_SAFE_ID_REGEX, -}; - -pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); - -pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.") - .format(&IP_V4_FORMAT) - .max_length(15) - .schema(); - -pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.") - .format(&IP_V6_FORMAT) - .max_length(39) - .schema(); - -pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.") - .format(&IP_FORMAT) - .max_length(39) - .schema(); - -pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).") - .format(&CIDR_V4_FORMAT) - .max_length(18) - .schema(); - -pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).") - .format(&CIDR_V6_FORMAT) - .max_length(43) - .schema(); - -pub const CIDR_SCHEMA: Schema = - StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).") - .format(&CIDR_FORMAT) - .max_length(43) - .schema(); - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Interface configuration method -pub enum NetworkConfigMethod { - /// Configuration is done manually using other tools - Manual, - /// Define interfaces with statically allocated addresses. - Static, - /// Obtain an address via DHCP - DHCP, - /// Define the loopback interface. - Loopback, -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -#[repr(u8)] -/// Linux Bond Mode -pub enum LinuxBondMode { - /// Round-robin policy - BalanceRr = 0, - /// Active-backup policy - ActiveBackup = 1, - /// XOR policy - BalanceXor = 2, - /// Broadcast policy - Broadcast = 3, - /// IEEE 802.3ad Dynamic link aggregation - #[serde(rename = "802.3ad")] - Ieee802_3ad = 4, - /// Adaptive transmit load balancing - BalanceTlb = 5, - /// Adaptive load balancing - BalanceAlb = 6, -} - -impl fmt::Display for LinuxBondMode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match self { - LinuxBondMode::BalanceRr => "balance-rr", - LinuxBondMode::ActiveBackup => "active-backup", - LinuxBondMode::BalanceXor => "balance-xor", - LinuxBondMode::Broadcast => "broadcast", - LinuxBondMode::Ieee802_3ad => "802.3ad", - LinuxBondMode::BalanceTlb => "balance-tlb", - LinuxBondMode::BalanceAlb => "balance-alb", - }) - } -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -#[repr(u8)] -/// Bond Transmit Hash Policy for LACP (802.3ad) -pub enum BondXmitHashPolicy { - /// Layer 2 - Layer2 = 0, - /// Layer 2+3 - #[serde(rename = "layer2+3")] - Layer2_3 = 1, - /// Layer 3+4 - #[serde(rename = "layer3+4")] - Layer3_4 = 2, -} - -impl fmt::Display for BondXmitHashPolicy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match self { - BondXmitHashPolicy::Layer2 => "layer2", - BondXmitHashPolicy::Layer2_3 => "layer2+3", - BondXmitHashPolicy::Layer3_4 => "layer3+4", - }) - } -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Network interface type -pub enum NetworkInterfaceType { - /// Loopback - Loopback, - /// Physical Ethernet device - Eth, - /// Linux Bridge - Bridge, - /// Linux Bond - Bond, - /// Linux VLAN (eth.10) - Vlan, - /// Interface Alias (eth:1) - Alias, - /// Unknown interface type - Unknown, -} - -pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.") - .format(&NETWORK_INTERFACE_FORMAT) - .min_length(1) - .max_length(15) // libc::IFNAMSIZ-1 - .schema(); - -pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = - ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema(); - -pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = - StringSchema::new("A list of network devices, comma separated.") - .format(&ApiStringFormat::PropertyString( - &NETWORK_INTERFACE_ARRAY_SCHEMA, - )) - .schema(); - -#[api( - properties: { - name: { - schema: NETWORK_INTERFACE_NAME_SCHEMA, - }, - "type": { - type: NetworkInterfaceType, - }, - method: { - type: NetworkConfigMethod, - optional: true, - }, - method6: { - type: NetworkConfigMethod, - optional: true, - }, - cidr: { - schema: CIDR_V4_SCHEMA, - optional: true, - }, - cidr6: { - schema: CIDR_V6_SCHEMA, - optional: true, - }, - gateway: { - schema: IP_V4_SCHEMA, - optional: true, - }, - gateway6: { - schema: IP_V6_SCHEMA, - optional: true, - }, - options: { - description: "Option list (inet)", - type: Array, - items: { - description: "Optional attribute line.", - type: String, - }, - }, - options6: { - description: "Option list (inet6)", - type: Array, - items: { - description: "Optional attribute line.", - type: String, - }, - }, - comments: { - description: "Comments (inet, may span multiple lines)", - type: String, - optional: true, - }, - comments6: { - description: "Comments (inet6, may span multiple lines)", - type: String, - optional: true, - }, - bridge_ports: { - schema: NETWORK_INTERFACE_ARRAY_SCHEMA, - optional: true, - }, - slaves: { - schema: NETWORK_INTERFACE_ARRAY_SCHEMA, - optional: true, - }, - "vlan-id": { - description: "VLAN ID.", - type: u16, - optional: true, - }, - "vlan-raw-device": { - schema: NETWORK_INTERFACE_NAME_SCHEMA, - optional: true, - }, - bond_mode: { - type: LinuxBondMode, - optional: true, - }, - "bond-primary": { - schema: NETWORK_INTERFACE_NAME_SCHEMA, - optional: true, - }, - bond_xmit_hash_policy: { - type: BondXmitHashPolicy, - optional: true, - }, - } -)] -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -/// Network Interface configuration -pub struct Interface { - /// Autostart interface - #[serde(rename = "autostart")] - pub autostart: bool, - /// Interface is active (UP) - pub active: bool, - /// Interface name - pub name: String, - /// Interface type - #[serde(rename = "type")] - pub interface_type: NetworkInterfaceType, - #[serde(skip_serializing_if = "Option::is_none")] - pub method: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub method6: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// IPv4 address with netmask - pub cidr: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// IPv4 gateway - pub gateway: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// IPv6 address with netmask - pub cidr6: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// IPv6 gateway - pub gateway6: Option, - - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub options: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub options6: Vec, - - #[serde(skip_serializing_if = "Option::is_none")] - pub comments: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comments6: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - /// Maximum Transmission Unit - pub mtu: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub bridge_ports: Option>, - /// Enable bridge vlan support. - #[serde(skip_serializing_if = "Option::is_none")] - pub bridge_vlan_aware: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "vlan-id")] - pub vlan_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "vlan-raw-device")] - pub vlan_raw_device: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub slaves: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub bond_mode: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "bond-primary")] - pub bond_primary: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub bond_xmit_hash_policy: Option, -} - -impl Interface { - pub fn new(name: String) -> Self { - Self { - name, - interface_type: NetworkInterfaceType::Unknown, - autostart: false, - active: false, - method: None, - method6: None, - cidr: None, - gateway: None, - cidr6: None, - gateway6: None, - options: Vec::new(), - options6: Vec::new(), - comments: None, - comments6: None, - mtu: None, - bridge_ports: None, - bridge_vlan_aware: None, - vlan_id: None, - vlan_raw_device: None, - slaves: None, - bond_mode: None, - bond_primary: None, - bond_xmit_hash_policy: None, - } - } -} diff --git a/pbs-api-types/src/node.rs b/pbs-api-types/src/node.rs deleted file mode 100644 index c4e9a179..00000000 --- a/pbs-api-types/src/node.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::ffi::OsStr; - -use proxmox_schema::*; -use serde::{Deserialize, Serialize}; - -use crate::StorageStatus; - -#[api] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Node memory usage counters -pub struct NodeMemoryCounters { - /// Total memory - pub total: u64, - /// Used memory - pub used: u64, - /// Free memory - pub free: u64, -} - -#[api] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Node swap usage counters -pub struct NodeSwapCounters { - /// Total swap - pub total: u64, - /// Used swap - pub used: u64, - /// Free swap - pub free: u64, -} - -#[api] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Contains general node information such as the fingerprint` -pub struct NodeInformation { - /// The SSL Fingerprint - pub fingerprint: String, -} - -#[api] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "lowercase")] -/// The current kernel version (output of `uname`) -pub struct KernelVersionInformation { - /// The systemname/nodename - pub sysname: String, - /// The kernel release number - pub release: String, - /// The kernel version - pub version: String, - /// The machine architecture - pub machine: String, -} - -impl KernelVersionInformation { - pub fn from_uname_parts( - sysname: &OsStr, - release: &OsStr, - version: &OsStr, - machine: &OsStr, - ) -> Self { - KernelVersionInformation { - sysname: sysname.to_str().map(String::from).unwrap_or_default(), - release: release.to_str().map(String::from).unwrap_or_default(), - version: version.to_str().map(String::from).unwrap_or_default(), - machine: machine.to_str().map(String::from).unwrap_or_default(), - } - } - - pub fn get_legacy(&self) -> String { - format!("{} {} {}", self.sysname, self.release, self.version) - } -} - -#[api] -#[derive(Serialize, Deserialize, Copy, Clone)] -#[serde(rename_all = "kebab-case")] -/// The possible BootModes -pub enum BootMode { - /// The BootMode is EFI/UEFI - Efi, - /// The BootMode is Legacy BIOS - LegacyBios, -} - -#[api] -#[derive(Serialize, Deserialize, Clone)] -#[serde(rename_all = "lowercase")] -/// Holds the Bootmodes -pub struct BootModeInformation { - /// The BootMode, either Efi or Bios - pub mode: BootMode, - /// SecureBoot status - pub secureboot: bool, -} - -#[api] -#[derive(Serialize, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -/// Information about the CPU -pub struct NodeCpuInformation { - /// The CPU model - pub model: String, - /// The number of CPU sockets - pub sockets: usize, - /// The number of CPU cores (incl. threads) - pub cpus: usize, -} - -#[api( - properties: { - memory: { - type: NodeMemoryCounters, - }, - root: { - type: StorageStatus, - }, - swap: { - type: NodeSwapCounters, - }, - loadavg: { - type: Array, - items: { - type: Number, - description: "the load", - } - }, - cpuinfo: { - type: NodeCpuInformation, - }, - info: { - type: NodeInformation, - } - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// The Node status -pub struct NodeStatus { - pub memory: NodeMemoryCounters, - pub root: StorageStatus, - pub swap: NodeSwapCounters, - /// The current uptime of the server. - pub uptime: u64, - /// Load for 1, 5 and 15 minutes. - pub loadavg: [f64; 3], - /// The current kernel version (NEW struct type). - pub current_kernel: KernelVersionInformation, - /// The current kernel version (LEGACY string type). - pub kversion: String, - /// Total CPU usage since last query. - pub cpu: f64, - /// Total IO wait since last query. - pub wait: f64, - pub cpuinfo: NodeCpuInformation, - pub info: NodeInformation, - /// Current boot mode - pub boot_info: BootModeInformation, -} diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs deleted file mode 100644 index 2c95c5c6..00000000 --- a/pbs-api-types/src/openid.rs +++ /dev/null @@ -1,120 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater}; - -use super::{ - GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, - SINGLE_LINE_COMMENT_SCHEMA, -}; - -pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX); - -pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.") - .format(&OPENID_SCOPE_FORMAT) - .schema(); - -pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema = - ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema(); - -pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat = - ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA); - -pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile"; -pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List") - .format(&OPENID_SCOPE_LIST_FORMAT) - .default(OPENID_DEFAILT_SCOPE_LIST) - .schema(); - -pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX); - -pub const OPENID_ACR_SCHEMA: Schema = - StringSchema::new("OpenID Authentication Context Class Reference.") - .format(&OPENID_ACR_FORMAT) - .schema(); - -pub const OPENID_ACR_ARRAY_SCHEMA: Schema = - ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema(); - -pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat = - ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA); - -pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List") - .format(&OPENID_ACR_LIST_FORMAT) - .schema(); - -pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new( - "Use the value of this attribute/claim as unique user name. It \ - is up to the identity provider to guarantee the uniqueness. The \ - OpenID specification only guarantees that Subject ('sub') is \ - unique. Also make sure that the user is not allowed to change that \ - attribute by himself!", -) -.max_length(64) -.min_length(1) -.format(&PROXMOX_SAFE_ID_FORMAT) -.schema(); - -#[api( - properties: { - realm: { - schema: REALM_ID_SCHEMA, - }, - "client-key": { - optional: true, - }, - "scopes": { - schema: OPENID_SCOPE_LIST_SCHEMA, - optional: true, - }, - "acr-values": { - schema: OPENID_ACR_LIST_SCHEMA, - optional: true, - }, - prompt: { - description: "OpenID Prompt", - type: String, - format: &PROXMOX_SAFE_ID_FORMAT, - optional: true, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - autocreate: { - optional: true, - default: false, - }, - "username-claim": { - schema: OPENID_USERNAME_CLAIM_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Updater)] -#[serde(rename_all = "kebab-case")] -/// OpenID configuration properties. -pub struct OpenIdRealmConfig { - #[updater(skip)] - pub realm: String, - /// OpenID Issuer Url - pub issuer_url: String, - /// OpenID Client ID - pub client_id: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub scopes: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub acr_values: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub prompt: Option, - /// OpenID Client Key - #[serde(skip_serializing_if = "Option::is_none")] - pub client_key: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// Automatically create users if they do not exist. - #[serde(skip_serializing_if = "Option::is_none")] - pub autocreate: Option, - #[updater(skip)] - #[serde(skip_serializing_if = "Option::is_none")] - pub username_claim: Option, -} diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs deleted file mode 100644 index 505ecc8a..00000000 --- a/pbs-api-types/src/pathpatterns.rs +++ /dev/null @@ -1,30 +0,0 @@ -use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema}; - -use serde::{Deserialize, Serialize}; - -const_regex! { - pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$"); -} - -pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX); - -pub const PATH_PATTERN_SCHEMA: Schema = - StringSchema::new("Path or match pattern for matching filenames.") - .format(&PATH_PATTERN_FORMAT) - .schema(); - -#[derive(Default, Deserialize, Serialize)] -/// Path or path pattern for filename matching -pub struct PathPattern { - pattern: String, -} - -impl ApiType for PathPattern { - const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA; -} - -impl AsRef<[u8]> for PathPattern { - fn as_ref(&self) -> &[u8] { - self.pattern.as_bytes() - } -} diff --git a/pbs-api-types/src/percent_encoding.rs b/pbs-api-types/src/percent_encoding.rs deleted file mode 100644 index afe011e2..00000000 --- a/pbs-api-types/src/percent_encoding.rs +++ /dev/null @@ -1,22 +0,0 @@ -use percent_encoding::{utf8_percent_encode, AsciiSet}; - -/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}` -pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e - // The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above) - .add(0x20) - .add(0x7f) - // the DEFAULT_ENCODE_SET added: - .add(b' ') - .add(b'"') - .add(b'#') - .add(b'<') - .add(b'>') - .add(b'`') - .add(b'?') - .add(b'{') - .add(b'}'); - -/// percent encode a url component -pub fn percent_encode_component(comp: &str) -> String { - utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string() -} diff --git a/pbs-api-types/src/remote.rs b/pbs-api-types/src/remote.rs deleted file mode 100644 index 0d5c9701..00000000 --- a/pbs-api-types/src/remote.rs +++ /dev/null @@ -1,106 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use super::*; -use proxmox_schema::*; - -pub const REMOTE_PASSWORD_SCHEMA: Schema = - StringSchema::new("Password or auth token for remote host.") - .format(&PASSWORD_FORMAT) - .min_length(1) - .max_length(1024) - .schema(); - -pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = - StringSchema::new("Password or auth token for remote host (stored as base64 string).") - .format(&PASSWORD_FORMAT) - .min_length(1) - .max_length(1024) - .schema(); - -pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -#[api( - properties: { - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - host: { - schema: DNS_NAME_OR_IP_SCHEMA, - }, - port: { - optional: true, - description: "The (optional) port", - type: u16, - }, - "auth-id": { - type: Authid, - }, - fingerprint: { - optional: true, - schema: CERT_FINGERPRINT_SHA256_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Remote configuration properties. -pub struct RemoteConfig { - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - pub host: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub port: Option, - pub auth_id: Authid, - #[serde(skip_serializing_if = "Option::is_none")] - pub fingerprint: Option, -} - -#[api( - properties: { - name: { - schema: REMOTE_ID_SCHEMA, - }, - config: { - type: RemoteConfig, - }, - password: { - schema: REMOTE_PASSWORD_BASE64_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Remote properties. -pub struct Remote { - pub name: String, - // Note: The stored password is base64 encoded - #[serde(default, skip_serializing_if = "String::is_empty")] - #[serde(with = "proxmox_serde::string_as_base64")] - pub password: String, - #[serde(flatten)] - pub config: RemoteConfig, -} - -#[api( - properties: { - name: { - schema: REMOTE_ID_SCHEMA, - }, - config: { - type: RemoteConfig, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Remote properties. -pub struct RemoteWithoutPassword { - pub name: String, - #[serde(flatten)] - pub config: RemoteConfig, -} diff --git a/pbs-api-types/src/tape/changer.rs b/pbs-api-types/src/tape/changer.rs deleted file mode 100644 index df3823cf..00000000 --- a/pbs-api-types/src/tape/changer.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Types for tape changer API - -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{ - api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater, -}; - -use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT}; - -pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -pub const SCSI_CHANGER_PATH_SCHEMA: Schema = - StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema(); - -pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(2) - .max_length(32) - .schema(); - -pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new( - "Slot list.", - &IntegerSchema::new("Slot number").minimum(1).schema(), -) -.schema(); - -pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new( - "\ -A list of slot numbers, comma separated. Those slots are reserved for -Import/Export, i.e. any media in those slots are considered to be -'offline'. -", -) -.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA)) -.schema(); - -#[api( - properties: { - name: { - schema: CHANGER_NAME_SCHEMA, - }, - path: { - schema: SCSI_CHANGER_PATH_SCHEMA, - }, - "export-slots": { - schema: EXPORT_SLOT_LIST_SCHEMA, - optional: true, - }, - "eject-before-unload": { - optional: true, - default: false, - } - }, -)] -#[derive(Serialize, Deserialize, Updater)] -#[serde(rename_all = "kebab-case")] -/// SCSI tape changer -pub struct ScsiTapeChanger { - #[updater(skip)] - pub name: String, - pub path: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub export_slots: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// if set to true, tapes are ejected manually before unloading - pub eject_before_unload: Option, -} - -#[api( - properties: { - config: { - type: ScsiTapeChanger, - }, - info: { - type: OptionalDeviceIdentification, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Changer config with optional device identification attributes -pub struct ChangerListEntry { - #[serde(flatten)] - pub config: ScsiTapeChanger, - #[serde(flatten)] - pub info: OptionalDeviceIdentification, -} - -#[api()] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Mtx Entry Kind -pub enum MtxEntryKind { - /// Drive - Drive, - /// Slot - Slot, - /// Import/Export Slot - ImportExport, -} - -#[api( - properties: { - "entry-kind": { - type: MtxEntryKind, - }, - "label-text": { - schema: MEDIA_LABEL_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Mtx Status Entry -pub struct MtxStatusEntry { - pub entry_kind: MtxEntryKind, - /// The ID of the slot or drive - pub entry_id: u64, - /// The media label (volume tag) if the slot/drive is full - #[serde(skip_serializing_if = "Option::is_none")] - pub label_text: Option, - /// The slot the drive was loaded from - #[serde(skip_serializing_if = "Option::is_none")] - pub loaded_slot: Option, - /// The current state of the drive - #[serde(skip_serializing_if = "Option::is_none")] - pub state: Option, -} diff --git a/pbs-api-types/src/tape/device.rs b/pbs-api-types/src/tape/device.rs deleted file mode 100644 index ff335cdf..00000000 --- a/pbs-api-types/src/tape/device.rs +++ /dev/null @@ -1,55 +0,0 @@ -use ::serde::{Deserialize, Serialize}; - -use proxmox_schema::api; - -#[api()] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Optional Device Identification Attributes -pub struct OptionalDeviceIdentification { - /// Vendor (autodetected) - #[serde(skip_serializing_if = "Option::is_none")] - pub vendor: Option, - /// Model (autodetected) - #[serde(skip_serializing_if = "Option::is_none")] - pub model: Option, - /// Serial number (autodetected) - #[serde(skip_serializing_if = "Option::is_none")] - pub serial: Option, -} - -#[api()] -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Kind of device -pub enum DeviceKind { - /// Tape changer (Autoloader, Robot) - Changer, - /// Normal SCSI tape device - Tape, -} - -#[api( - properties: { - kind: { - type: DeviceKind, - }, - }, -)] -#[derive(Debug, Serialize, Deserialize)] -/// Tape device information -pub struct TapeDeviceInfo { - pub kind: DeviceKind, - /// Path to the linux device node - pub path: String, - /// Serial number (autodetected) - pub serial: String, - /// Vendor (autodetected) - pub vendor: String, - /// Model (autodetected) - pub model: String, - /// Device major number - pub major: u32, - /// Device minor number - pub minor: u32, -} diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs deleted file mode 100644 index e00665cd..00000000 --- a/pbs-api-types/src/tape/drive.rs +++ /dev/null @@ -1,350 +0,0 @@ -//! Types for tape drive API -use anyhow::{bail, Error}; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; - -use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; - -pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -pub const LTO_DRIVE_PATH_SCHEMA: Schema = - StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema(); - -pub const CHANGER_DRIVENUM_SCHEMA: Schema = - IntegerSchema::new("Associated changer drive number (requires option changer)") - .minimum(0) - .maximum(255) - .default(0) - .schema(); - -#[api( - properties: { - name: { - schema: DRIVE_NAME_SCHEMA, - } - } -)] -#[derive(Serialize, Deserialize)] -/// Simulate tape drives (only for test and debug) -#[serde(rename_all = "kebab-case")] -pub struct VirtualTapeDrive { - pub name: String, - /// Path to directory - pub path: String, - /// Virtual tape size - #[serde(skip_serializing_if = "Option::is_none")] - pub max_size: Option, -} - -#[api( - properties: { - name: { - schema: DRIVE_NAME_SCHEMA, - }, - path: { - schema: LTO_DRIVE_PATH_SCHEMA, - }, - changer: { - schema: CHANGER_NAME_SCHEMA, - optional: true, - }, - "changer-drivenum": { - schema: CHANGER_DRIVENUM_SCHEMA, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Updater, Clone)] -#[serde(rename_all = "kebab-case")] -/// Lto SCSI tape driver -pub struct LtoTapeDrive { - #[updater(skip)] - pub name: String, - pub path: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub changer: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub changer_drivenum: Option, -} - -#[api( - properties: { - config: { - type: LtoTapeDrive, - }, - info: { - type: OptionalDeviceIdentification, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Drive list entry -pub struct DriveListEntry { - #[serde(flatten)] - pub config: LtoTapeDrive, - #[serde(flatten)] - pub info: OptionalDeviceIdentification, - /// the state of the drive if locked - #[serde(skip_serializing_if = "Option::is_none")] - pub state: Option, - /// Current device activity - #[serde(skip_serializing_if = "Option::is_none")] - pub activity: Option, -} - -#[api()] -#[derive(Serialize, Deserialize)] -/// Medium auxiliary memory attributes (MAM) -pub struct MamAttribute { - /// Attribute id - pub id: u16, - /// Attribute name - pub name: String, - /// Attribute value - pub value: String, -} - -#[api()] -#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)] -/// The density of a tape medium, derived from the LTO version. -pub enum TapeDensity { - /// Unknown (no media loaded) - Unknown, - /// LTO1 - LTO1, - /// LTO2 - LTO2, - /// LTO3 - LTO3, - /// LTO4 - LTO4, - /// LTO5 - LTO5, - /// LTO6 - LTO6, - /// LTO7 - LTO7, - /// LTO7M8 - LTO7M8, - /// LTO8 - LTO8, - /// LTO9 - LTO9, -} - -impl TryFrom for TapeDensity { - type Error = Error; - - fn try_from(value: u8) -> Result { - let density = match value { - 0x00 => TapeDensity::Unknown, - 0x40 => TapeDensity::LTO1, - 0x42 => TapeDensity::LTO2, - 0x44 => TapeDensity::LTO3, - 0x46 => TapeDensity::LTO4, - 0x58 => TapeDensity::LTO5, - 0x5a => TapeDensity::LTO6, - 0x5c => TapeDensity::LTO7, - 0x5d => TapeDensity::LTO7M8, - 0x5e => TapeDensity::LTO8, - 0x60 => TapeDensity::LTO9, - _ => bail!("unknown tape density code 0x{:02x}", value), - }; - Ok(density) - } -} - -#[api( - properties: { - density: { - type: TapeDensity, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Drive/Media status for Lto SCSI drives. -/// -/// Media related data is optional - only set if there is a medium -/// loaded. -pub struct LtoDriveAndMediaStatus { - /// Vendor - pub vendor: String, - /// Product - pub product: String, - /// Revision - pub revision: String, - /// Block size (0 is variable size) - pub blocksize: u32, - /// Compression enabled - pub compression: bool, - /// Drive buffer mode - pub buffer_mode: u8, - /// Tape density - pub density: TapeDensity, - /// Media is write protected - #[serde(skip_serializing_if = "Option::is_none")] - pub write_protect: Option, - /// Tape Alert Flags - #[serde(skip_serializing_if = "Option::is_none")] - pub alert_flags: Option, - /// Current file number - #[serde(skip_serializing_if = "Option::is_none")] - pub file_number: Option, - /// Current block number - #[serde(skip_serializing_if = "Option::is_none")] - pub block_number: Option, - /// Medium Manufacture Date (epoch) - #[serde(skip_serializing_if = "Option::is_none")] - pub manufactured: Option, - /// Total Bytes Read in Medium Life - #[serde(skip_serializing_if = "Option::is_none")] - pub bytes_read: Option, - /// Total Bytes Written in Medium Life - #[serde(skip_serializing_if = "Option::is_none")] - pub bytes_written: Option, - /// Number of mounts for the current volume (i.e., Thread Count) - #[serde(skip_serializing_if = "Option::is_none")] - pub volume_mounts: Option, - /// Count of the total number of times the medium has passed over - /// the head. - #[serde(skip_serializing_if = "Option::is_none")] - pub medium_passes: Option, - /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) - #[serde(skip_serializing_if = "Option::is_none")] - pub medium_wearout: Option, - /// Current device activity - #[serde(skip_serializing_if = "Option::is_none")] - pub drive_activity: Option, -} - -#[api()] -/// Volume statistics from SCSI log page 17h -#[derive(Default, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -pub struct Lp17VolumeStatistics { - /// Volume mounts (thread count) - pub volume_mounts: u64, - /// Total data sets written - pub volume_datasets_written: u64, - /// Write retries - pub volume_recovered_write_data_errors: u64, - /// Total unrecovered write errors - pub volume_unrecovered_write_data_errors: u64, - /// Total suspended writes - pub volume_write_servo_errors: u64, - /// Total fatal suspended writes - pub volume_unrecovered_write_servo_errors: u64, - /// Total datasets read - pub volume_datasets_read: u64, - /// Total read retries - pub volume_recovered_read_errors: u64, - /// Total unrecovered read errors - pub volume_unrecovered_read_errors: u64, - /// Last mount unrecovered write errors - pub last_mount_unrecovered_write_errors: u64, - /// Last mount unrecovered read errors - pub last_mount_unrecovered_read_errors: u64, - /// Last mount bytes written - pub last_mount_bytes_written: u64, - /// Last mount bytes read - pub last_mount_bytes_read: u64, - /// Lifetime bytes written - pub lifetime_bytes_written: u64, - /// Lifetime bytes read - pub lifetime_bytes_read: u64, - /// Last load write compression ratio - pub last_load_write_compression_ratio: u64, - /// Last load read compression ratio - pub last_load_read_compression_ratio: u64, - /// Medium mount time - pub medium_mount_time: u64, - /// Medium ready time - pub medium_ready_time: u64, - /// Total native capacity - pub total_native_capacity: u64, - /// Total used native capacity - pub total_used_native_capacity: u64, - /// Write protect - pub write_protect: bool, - /// Volume is WORM - pub worm: bool, - /// Beginning of medium passes - pub beginning_of_medium_passes: u64, - /// Middle of medium passes - pub middle_of_tape_passes: u64, - /// Volume serial number - pub serial: String, -} - -/// The DT Device Activity from DT Device Status LP page -#[api] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -pub enum DeviceActivity { - /// No activity - NoActivity, - /// Cleaning - Cleaning, - /// Loading - Loading, - /// Unloading - Unloading, - /// Other unspecified activity - Other, - /// Reading - Reading, - /// Writing - Writing, - /// Locating - Locating, - /// Rewinding - Rewinding, - /// Erasing - Erasing, - /// Formatting - Formatting, - /// Calibrating - Calibrating, - /// Other (DT) - OtherDT, - /// Updating microcode - MicrocodeUpdate, - /// Reading encrypted data - ReadingEncrypted, - /// Writing encrypted data - WritingEncrypted, -} - -impl TryFrom for DeviceActivity { - type Error = Error; - - fn try_from(value: u8) -> Result { - Ok(match value { - 0x00 => DeviceActivity::NoActivity, - 0x01 => DeviceActivity::Cleaning, - 0x02 => DeviceActivity::Loading, - 0x03 => DeviceActivity::Unloading, - 0x04 => DeviceActivity::Other, - 0x05 => DeviceActivity::Reading, - 0x06 => DeviceActivity::Writing, - 0x07 => DeviceActivity::Locating, - 0x08 => DeviceActivity::Rewinding, - 0x09 => DeviceActivity::Erasing, - 0x0A => DeviceActivity::Formatting, - 0x0B => DeviceActivity::Calibrating, - 0x0C => DeviceActivity::OtherDT, - 0x0D => DeviceActivity::MicrocodeUpdate, - 0x0E => DeviceActivity::ReadingEncrypted, - 0x0F => DeviceActivity::WritingEncrypted, - other => bail!("invalid DT device activity value: {:x}", other), - }) - } -} diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs deleted file mode 100644 index 6227f463..00000000 --- a/pbs-api-types/src/tape/media.rs +++ /dev/null @@ -1,179 +0,0 @@ -use ::serde::{Deserialize, Serialize}; - -use proxmox_schema::*; -use proxmox_uuid::Uuid; - -use crate::{MediaLocation, MediaStatus, UUID_FORMAT}; - -pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new( - "MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).", -) -.format(&UUID_FORMAT) -.schema(); - -pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.") - .format(&UUID_FORMAT) - .schema(); - -#[api( - properties: { - "media-set-uuid": { - schema: MEDIA_SET_UUID_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Media Set list entry -pub struct MediaSetListEntry { - /// Media set name - pub media_set_name: String, - pub media_set_uuid: Uuid, - /// MediaSet creation time stamp - pub media_set_ctime: i64, - /// Media Pool - pub pool: String, -} - -#[api( - properties: { - location: { - type: MediaLocation, - }, - status: { - type: MediaStatus, - }, - uuid: { - schema: MEDIA_UUID_SCHEMA, - }, - "media-set-uuid": { - schema: MEDIA_SET_UUID_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Media list entry -pub struct MediaListEntry { - /// Media label text (or Barcode) - pub label_text: String, - pub uuid: Uuid, - /// Creation time stamp - pub ctime: i64, - pub location: MediaLocation, - pub status: MediaStatus, - /// Expired flag - pub expired: bool, - /// Catalog status OK - pub catalog: bool, - /// Media set name - #[serde(skip_serializing_if = "Option::is_none")] - pub media_set_name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub media_set_uuid: Option, - /// Media set seq_nr - #[serde(skip_serializing_if = "Option::is_none")] - pub seq_nr: Option, - /// MediaSet creation time stamp - #[serde(skip_serializing_if = "Option::is_none")] - pub media_set_ctime: Option, - /// Media Pool - #[serde(skip_serializing_if = "Option::is_none")] - pub pool: Option, - #[serde(skip_serializing_if = "Option::is_none")] - /// Bytes currently used - pub bytes_used: Option, -} - -#[api( - properties: { - uuid: { - schema: MEDIA_UUID_SCHEMA, - }, - "media-set-uuid": { - schema: MEDIA_SET_UUID_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Media label info -pub struct MediaIdFlat { - /// Unique ID - pub uuid: Uuid, - /// Media label text (or Barcode) - pub label_text: String, - /// Creation time stamp - pub ctime: i64, - // All MediaSet properties are optional here - /// MediaSet Pool - #[serde(skip_serializing_if = "Option::is_none")] - pub pool: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub media_set_uuid: Option, - /// MediaSet media sequence number - #[serde(skip_serializing_if = "Option::is_none")] - pub seq_nr: Option, - /// MediaSet Creation time stamp - #[serde(skip_serializing_if = "Option::is_none")] - pub media_set_ctime: Option, - /// Encryption key fingerprint - #[serde(skip_serializing_if = "Option::is_none")] - pub encryption_key_fingerprint: Option, -} - -#[api( - properties: { - uuid: { - schema: MEDIA_UUID_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Label with optional Uuid -pub struct LabelUuidMap { - /// Changer label text (or Barcode) - pub label_text: String, - /// Associated Uuid (if any) - pub uuid: Option, -} - -#[api( - properties: { - uuid: { - schema: MEDIA_UUID_SCHEMA, - }, - "media-set-uuid": { - schema: MEDIA_SET_UUID_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Media content list entry -pub struct MediaContentEntry { - /// Media label text (or Barcode) - pub label_text: String, - /// Media Uuid - pub uuid: Uuid, - /// Media set name - pub media_set_name: String, - /// Media set uuid - pub media_set_uuid: Uuid, - /// MediaSet Creation time stamp - pub media_set_ctime: i64, - /// Media set seq_nr - pub seq_nr: u64, - /// Media Pool - pub pool: String, - /// Datastore Name - pub store: String, - /// Backup snapshot - pub snapshot: String, - /// Snapshot creation time (epoch) - pub backup_time: i64, -} diff --git a/pbs-api-types/src/tape/media_location.rs b/pbs-api-types/src/tape/media_location.rs deleted file mode 100644 index 608460b5..00000000 --- a/pbs-api-types/src/tape/media_location.rs +++ /dev/null @@ -1,80 +0,0 @@ -use anyhow::{bail, Error}; - -use proxmox_schema::{ApiStringFormat, Schema, StringSchema}; - -use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT}; - -pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -#[derive(Debug, PartialEq, Eq, Clone)] -/// Media location -pub enum MediaLocation { - /// Ready for use (inside tape library) - Online(String), - /// Local available, but need to be mounted (insert into tape - /// drive) - Offline, - /// Media is inside a Vault - Vault(String), -} - -proxmox_serde::forward_deserialize_to_from_str!(MediaLocation); -proxmox_serde::forward_serialize_to_display!(MediaLocation); - -impl proxmox_schema::ApiType for MediaLocation { - const API_SCHEMA: Schema = StringSchema::new( - "Media location (e.g. 'offline', 'online-', 'vault-')", - ) - .format(&ApiStringFormat::VerifyFn(|text| { - let location: MediaLocation = text.parse()?; - match location { - MediaLocation::Online(ref changer) => { - CHANGER_NAME_SCHEMA.parse_simple_value(changer)?; - } - MediaLocation::Vault(ref vault) => { - VAULT_NAME_SCHEMA.parse_simple_value(vault)?; - } - MediaLocation::Offline => { /* OK */ } - } - Ok(()) - })) - .schema(); -} - -impl std::fmt::Display for MediaLocation { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - MediaLocation::Offline => { - write!(f, "offline") - } - MediaLocation::Online(changer) => { - write!(f, "online-{}", changer) - } - MediaLocation::Vault(vault) => { - write!(f, "vault-{}", vault) - } - } - } -} - -impl std::str::FromStr for MediaLocation { - type Err = Error; - - fn from_str(s: &str) -> Result { - if s == "offline" { - return Ok(MediaLocation::Offline); - } - if let Some(changer) = s.strip_prefix("online-") { - return Ok(MediaLocation::Online(changer.to_string())); - } - if let Some(vault) = s.strip_prefix("vault-") { - return Ok(MediaLocation::Vault(vault.to_string())); - } - - bail!("MediaLocation parse error"); - } -} diff --git a/pbs-api-types/src/tape/media_pool.rs b/pbs-api-types/src/tape/media_pool.rs deleted file mode 100644 index c3eacec7..00000000 --- a/pbs-api-types/src/tape/media_pool.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! Types for tape media pool API -//! -//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums, -//! so we cannot use them directly for the API. Instead, we represent -//! them as String. - -use std::str::FromStr; - -use anyhow::Error; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater}; - -use proxmox_time::{CalendarEvent, TimeSpan}; - -use crate::{ - PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, - TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, -}; - -pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(2) - .max_length(32) - .schema(); - -pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new( - "Media set naming template (may contain strftime() time format specifications).", -) -.format(&SINGLE_LINE_COMMENT_FORMAT) -.min_length(2) -.max_length(64) -.schema(); - -pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { - MediaSetPolicy::from_str(s)?; - Ok(()) -}); - -pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = - StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).") - .format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT) - .schema(); - -/// Media set allocation policy -pub enum MediaSetPolicy { - /// Try to use the current media set - ContinueCurrent, - /// Each backup job creates a new media set - AlwaysCreate, - /// Create a new set when the specified CalendarEvent triggers - CreateAt(CalendarEvent), -} - -impl std::str::FromStr for MediaSetPolicy { - type Err = Error; - - fn from_str(s: &str) -> Result { - if s == "continue" { - return Ok(MediaSetPolicy::ContinueCurrent); - } - if s == "always" { - return Ok(MediaSetPolicy::AlwaysCreate); - } - - let event = s.parse()?; - - Ok(MediaSetPolicy::CreateAt(event)) - } -} - -pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| { - RetentionPolicy::from_str(s)?; - Ok(()) -}); - -pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = - StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).") - .format(&MEDIA_RETENTION_POLICY_FORMAT) - .schema(); - -/// Media retention Policy -pub enum RetentionPolicy { - /// Always overwrite media - OverwriteAlways, - /// Protect data for the timespan specified - ProtectFor(TimeSpan), - /// Never overwrite data - KeepForever, -} - -impl std::str::FromStr for RetentionPolicy { - type Err = Error; - - fn from_str(s: &str) -> Result { - if s == "overwrite" { - return Ok(RetentionPolicy::OverwriteAlways); - } - if s == "keep" { - return Ok(RetentionPolicy::KeepForever); - } - - let time_span = s.parse()?; - - Ok(RetentionPolicy::ProtectFor(time_span)) - } -} - -#[api( - properties: { - name: { - schema: MEDIA_POOL_NAME_SCHEMA, - }, - allocation: { - schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA, - optional: true, - }, - retention: { - schema: MEDIA_RETENTION_POLICY_SCHEMA, - optional: true, - }, - template: { - schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA, - optional: true, - }, - encrypt: { - schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, - optional: true, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - }, -)] -#[derive(Serialize, Deserialize, Updater)] -/// Media pool configuration -pub struct MediaPoolConfig { - /// The pool name - #[updater(skip)] - pub name: String, - /// Media Set allocation policy - #[serde(skip_serializing_if = "Option::is_none")] - pub allocation: Option, - /// Media retention policy - #[serde(skip_serializing_if = "Option::is_none")] - pub retention: Option, - /// Media set naming template (default "%c") - /// - /// The template is UTF8 text, and can include strftime time - /// format specifications. - #[serde(skip_serializing_if = "Option::is_none")] - pub template: Option, - /// Encryption key fingerprint - /// - /// If set, encrypt all data using the specified key. - #[serde(skip_serializing_if = "Option::is_none")] - pub encrypt: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, -} diff --git a/pbs-api-types/src/tape/media_status.rs b/pbs-api-types/src/tape/media_status.rs deleted file mode 100644 index fdb4e6a0..00000000 --- a/pbs-api-types/src/tape/media_status.rs +++ /dev/null @@ -1,21 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::api; - -#[api()] -/// Media status -#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// Media Status -pub enum MediaStatus { - /// Media is ready to be written - Writable, - /// Media is full (contains data) - Full, - /// Media is marked as unknown, needs rescan - Unknown, - /// Media is marked as damaged - Damaged, - /// Media is marked as retired - Retired, -} diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs deleted file mode 100644 index 6a9d56bc..00000000 --- a/pbs-api-types/src/tape/mod.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! Types for tape backup API - -mod device; -pub use device::*; - -mod changer; -pub use changer::*; - -mod drive; -pub use drive::*; - -mod media_pool; -pub use media_pool::*; - -mod media_status; -pub use media_status::*; - -mod media_location; - -pub use media_location::*; - -mod media; -pub use media::*; - -use const_format::concatcp; -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema}; -use proxmox_uuid::Uuid; - -use crate::{ - BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT, - PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR, -}; - -const_regex! { - pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$"); -} - -pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat = - ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX); - -pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = - StringSchema::new("Tape encryption key fingerprint (sha256).") - .format(&FINGERPRINT_SHA256_FORMAT) - .schema(); - -pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = - StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time") - .format(&TAPE_RESTORE_SNAPSHOT_FORMAT) - .type_text("store:[ns/namespace/...]type/id/time") - .schema(); - -#[api( - properties: { - pool: { - schema: MEDIA_POOL_NAME_SCHEMA, - optional: true, - }, - "label-text": { - schema: MEDIA_LABEL_SCHEMA, - optional: true, - }, - "media": { - schema: MEDIA_UUID_SCHEMA, - optional: true, - }, - "media-set": { - schema: MEDIA_SET_UUID_SCHEMA, - optional: true, - }, - "backup-type": { - type: BackupType, - optional: true, - }, - "backup-id": { - schema: BACKUP_ID_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// Content list filter parameters -pub struct MediaContentListFilter { - pub pool: Option, - pub label_text: Option, - pub media: Option, - pub media_set: Option, - pub backup_type: Option, - pub backup_id: Option, -} diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs deleted file mode 100644 index c68f4637..00000000 --- a/pbs-api-types/src/traffic_control.rs +++ /dev/null @@ -1,168 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_human_byte::HumanByte; -use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater}; - -use crate::{ - CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, -}; - -pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema = - StringSchema::new("Timeframe to specify when the rule is active.") - .format(&DAILY_DURATION_FORMAT) - .schema(); - -pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") - .format(&PROXMOX_SAFE_ID_FORMAT) - .min_length(3) - .max_length(32) - .schema(); - -#[api( - properties: { - "rate-in": { - type: HumanByte, - optional: true, - }, - "burst-in": { - type: HumanByte, - optional: true, - }, - "rate-out": { - type: HumanByte, - optional: true, - }, - "burst-out": { - type: HumanByte, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Rate Limit Configuration -pub struct RateLimitConfig { - #[serde(skip_serializing_if = "Option::is_none")] - pub rate_in: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub burst_in: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub rate_out: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub burst_out: Option, -} - -impl RateLimitConfig { - pub fn with_same_inout(rate: Option, burst: Option) -> Self { - Self { - rate_in: rate, - burst_in: burst, - rate_out: rate, - burst_out: burst, - } - } - - /// Create a [RateLimitConfig] from a [ClientRateLimitConfig] - pub fn from_client_config(limit: ClientRateLimitConfig) -> Self { - Self::with_same_inout(limit.rate, limit.burst) - } -} - -const CLIENT_RATE_LIMIT_SCHEMA: Schema = HumanByte::API_SCHEMA - .unwrap_string_schema_cloned() - .description("Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).") - .schema(); - -const CLIENT_BURST_SCHEMA: Schema = HumanByte::API_SCHEMA - .unwrap_string_schema_cloned() - .description("Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).") - .schema(); - -#[api( - properties: { - rate: { - schema: CLIENT_RATE_LIMIT_SCHEMA, - optional: true, - }, - burst: { - schema: CLIENT_BURST_SCHEMA, - optional: true, - }, - }, -)] -#[derive(Serialize, Deserialize, Default, Clone)] -#[serde(rename_all = "kebab-case")] -/// Client Rate Limit Configuration -pub struct ClientRateLimitConfig { - #[serde(skip_serializing_if = "Option::is_none")] - rate: Option, - #[serde(skip_serializing_if = "Option::is_none")] - burst: Option, -} - -#[api( - properties: { - name: { - schema: TRAFFIC_CONTROL_ID_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - limit: { - type: RateLimitConfig, - }, - network: { - type: Array, - items: { - schema: CIDR_SCHEMA, - }, - }, - timeframe: { - type: Array, - items: { - schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA, - }, - optional: true, - }, - }, -)] -#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)] -#[serde(rename_all = "kebab-case")] -/// Traffic control rule -pub struct TrafficControlRule { - #[updater(skip)] - pub name: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - /// Rule applies to Source IPs within this networks - pub network: Vec, - #[serde(flatten)] - pub limit: RateLimitConfig, - // fixme: expose this? - // /// Bandwidth is shared across all connections - // #[serde(skip_serializing_if="Option::is_none")] - // pub shared: Option, - /// Enable the rule at specific times - #[serde(skip_serializing_if = "Option::is_none")] - pub timeframe: Option>, -} - -#[api( - properties: { - config: { - type: TrafficControlRule, - }, - }, -)] -#[derive(Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// Traffic control rule config with current rates -pub struct TrafficControlCurrentRate { - #[serde(flatten)] - pub config: TrafficControlRule, - /// Current ingress rate in bytes/second - pub cur_rate_in: u64, - /// Current egress rate in bytes/second - pub cur_rate_out: u64, -} diff --git a/pbs-api-types/src/user.rs b/pbs-api-types/src/user.rs deleted file mode 100644 index 42f41266..00000000 --- a/pbs-api-types/src/user.rs +++ /dev/null @@ -1,226 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater}; - -use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA}; -use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA}; - -pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new( - "Enable the account (default). You can set this to '0' to disable the account.", -) -.default(true) -.schema(); - -pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new( - "Account expiration date (seconds since epoch). '0' means no expiration date.", -) -.default(0) -.minimum(0) -.schema(); - -pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(2) - .max_length(64) - .schema(); - -pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(2) - .max_length(64) - .schema(); - -pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(2) - .max_length(64) - .schema(); - -#[api( - properties: { - userid: { - type: Userid, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - enable: { - optional: true, - schema: ENABLE_USER_SCHEMA, - }, - expire: { - optional: true, - schema: EXPIRE_USER_SCHEMA, - }, - firstname: { - optional: true, - schema: FIRST_NAME_SCHEMA, - }, - lastname: { - schema: LAST_NAME_SCHEMA, - optional: true, - }, - email: { - schema: EMAIL_SCHEMA, - optional: true, - }, - tokens: { - type: Array, - optional: true, - description: "List of user's API tokens.", - items: { - type: ApiToken - }, - }, - "totp-locked": { - type: bool, - optional: true, - default: false, - description: "True if the user is currently locked out of TOTP factors", - }, - "tfa-locked-until": { - optional: true, - description: "Contains a timestamp until when a user is locked out of 2nd factors", - }, - } -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -/// User properties with added list of ApiTokens -pub struct UserWithTokens { - pub userid: Userid, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub enable: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub expire: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub firstname: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub lastname: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub email: Option, - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub tokens: Vec, - #[serde(skip_serializing_if = "bool_is_false", default)] - pub totp_locked: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub tfa_locked_until: Option, -} - -fn bool_is_false(b: &bool) -> bool { - !b -} - -#[api( - properties: { - tokenid: { - schema: PROXMOX_TOKEN_ID_SCHEMA, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - enable: { - optional: true, - schema: ENABLE_USER_SCHEMA, - }, - expire: { - optional: true, - schema: EXPIRE_USER_SCHEMA, - }, - } -)] -#[derive(Serialize, Deserialize, Clone, PartialEq)] -/// ApiToken properties. -pub struct ApiToken { - pub tokenid: Authid, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub enable: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub expire: Option, -} - -impl ApiToken { - pub fn is_active(&self) -> bool { - if !self.enable.unwrap_or(true) { - return false; - } - if let Some(expire) = self.expire { - let now = proxmox_time::epoch_i64(); - if expire > 0 && expire <= now { - return false; - } - } - true - } -} - -#[api( - properties: { - userid: { - type: Userid, - }, - comment: { - optional: true, - schema: SINGLE_LINE_COMMENT_SCHEMA, - }, - enable: { - optional: true, - schema: ENABLE_USER_SCHEMA, - }, - expire: { - optional: true, - schema: EXPIRE_USER_SCHEMA, - }, - firstname: { - optional: true, - schema: FIRST_NAME_SCHEMA, - }, - lastname: { - schema: LAST_NAME_SCHEMA, - optional: true, - }, - email: { - schema: EMAIL_SCHEMA, - optional: true, - }, - } -)] -#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)] -/// User properties. -pub struct User { - #[updater(skip)] - pub userid: Userid, - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub enable: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub expire: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub firstname: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub lastname: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub email: Option, -} - -impl User { - pub fn is_active(&self) -> bool { - if !self.enable.unwrap_or(true) { - return false; - } - if let Some(expire) = self.expire { - let now = proxmox_time::epoch_i64(); - if expire > 0 && expire <= now { - return false; - } - } - true - } -} diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs deleted file mode 100644 index 09e725eb..00000000 --- a/pbs-api-types/src/version.rs +++ /dev/null @@ -1,190 +0,0 @@ -//! Defines the types for the api version info endpoint -use std::cmp::Ordering; -use std::convert::TryFrom; - -use anyhow::{format_err, Context}; - -use proxmox_schema::api; - -#[api( - description: "Api version information", - properties: { - "version": { - description: "Version 'major.minor'", - type: String, - }, - "release": { - description: "Version release", - type: String, - }, - "repoid": { - description: "Version repository id", - type: String, - }, - } -)] -#[derive(serde::Deserialize, serde::Serialize)] -pub struct ApiVersionInfo { - pub version: String, - pub release: String, - pub repoid: String, -} - -pub type ApiVersionMajor = u64; -pub type ApiVersionMinor = u64; -pub type ApiVersionRelease = u64; - -#[derive(PartialEq, Eq)] -pub struct ApiVersion { - pub major: ApiVersionMajor, - pub minor: ApiVersionMinor, - pub release: ApiVersionRelease, -} - -impl TryFrom for ApiVersion { - type Error = anyhow::Error; - - fn try_from(value: ApiVersionInfo) -> Result { - let (major, minor) = value - .version - .split_once('.') - .ok_or_else(|| format_err!("malformed API version {}", value.version))?; - - let major: ApiVersionMajor = major - .parse() - .with_context(|| "failed to parse major version")?; - let minor: ApiVersionMinor = minor - .parse() - .with_context(|| "failed to parse minor version")?; - let release: ApiVersionRelease = value - .release - .parse() - .with_context(|| "failed to parse release version")?; - - Ok(Self { - major, - minor, - release, - }) - } -} - -impl PartialOrd for ApiVersion { - fn partial_cmp(&self, other: &Self) -> Option { - let ordering = match ( - self.major.cmp(&other.major), - self.minor.cmp(&other.minor), - self.release.cmp(&other.release), - ) { - (Ordering::Equal, Ordering::Equal, ordering) => ordering, - (Ordering::Equal, ordering, _) => ordering, - (ordering, _, _) => ordering, - }; - - Some(ordering) - } -} - -impl ApiVersion { - pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self { - Self { - major, - minor, - release, - } - } -} - -#[test] -fn same_level_version_comarison() { - let major_base = ApiVersion::new(2, 0, 0); - let major_less = ApiVersion::new(1, 0, 0); - let major_greater = ApiVersion::new(3, 0, 0); - - let minor_base = ApiVersion::new(2, 2, 0); - let minor_less = ApiVersion::new(2, 1, 0); - let minor_greater = ApiVersion::new(2, 3, 0); - - let release_base = ApiVersion::new(2, 2, 2); - let release_less = ApiVersion::new(2, 2, 1); - let release_greater = ApiVersion::new(2, 2, 3); - - assert!(major_base == major_base); - assert!(minor_base == minor_base); - assert!(release_base == release_base); - - assert!(major_base > major_less); - assert!(major_base >= major_less); - assert!(major_base != major_less); - - assert!(major_base < major_greater); - assert!(major_base <= major_greater); - assert!(major_base != major_greater); - - assert!(minor_base > minor_less); - assert!(minor_base >= minor_less); - assert!(minor_base != minor_less); - - assert!(minor_base < minor_greater); - assert!(minor_base <= minor_greater); - assert!(minor_base != minor_greater); - - assert!(release_base > release_less); - assert!(release_base >= release_less); - assert!(release_base != release_less); - - assert!(release_base < release_greater); - assert!(release_base <= release_greater); - assert!(release_base != release_greater); -} - -#[test] -fn mixed_level_version_comarison() { - let major_base = ApiVersion::new(2, 0, 0); - let major_less = ApiVersion::new(1, 0, 0); - let major_greater = ApiVersion::new(3, 0, 0); - - let minor_base = ApiVersion::new(2, 2, 0); - let minor_less = ApiVersion::new(2, 1, 0); - let minor_greater = ApiVersion::new(2, 3, 0); - - let release_base = ApiVersion::new(2, 2, 2); - let release_less = ApiVersion::new(2, 2, 1); - let release_greater = ApiVersion::new(2, 2, 3); - - assert!(major_base < minor_base); - assert!(major_base < minor_less); - assert!(major_base < minor_greater); - - assert!(major_base < release_base); - assert!(major_base < release_less); - assert!(major_base < release_greater); - - assert!(major_less < minor_base); - assert!(major_less < minor_less); - assert!(major_less < minor_greater); - - assert!(major_less < release_base); - assert!(major_less < release_less); - assert!(major_less < release_greater); - - assert!(major_greater > minor_base); - assert!(major_greater > minor_less); - assert!(major_greater > minor_greater); - - assert!(major_greater > release_base); - assert!(major_greater > release_less); - assert!(major_greater > release_greater); - - assert!(minor_base < release_base); - assert!(minor_base < release_less); - assert!(minor_base < release_greater); - - assert!(minor_greater > release_base); - assert!(minor_greater > release_less); - assert!(minor_greater > release_greater); - - assert!(minor_less < release_base); - assert!(minor_less < release_less); - assert!(minor_less < release_greater); -} diff --git a/pbs-api-types/src/zfs.rs b/pbs-api-types/src/zfs.rs deleted file mode 100644 index 57fa5cf4..00000000 --- a/pbs-api-types/src/zfs.rs +++ /dev/null @@ -1,78 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use proxmox_schema::*; - -const_regex! { - pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$"; -} - -pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.") - .minimum(9) - .maximum(16) - .default(12) - .schema(); - -pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name") - .format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX)) - .schema(); - -#[api(default: "On")] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// The ZFS compression algorithm to use. -pub enum ZfsCompressionType { - /// Gnu Zip - Gzip, - /// LZ4 - Lz4, - /// LZJB - Lzjb, - /// ZLE - Zle, - /// ZStd - ZStd, - /// Enable compression using the default algorithm. - On, - /// Disable compression. - Off, -} - -#[api()] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// The ZFS RAID level to use. -pub enum ZfsRaidLevel { - /// Single Disk - Single, - /// Mirror - Mirror, - /// Raid10 - Raid10, - /// RaidZ - RaidZ, - /// RaidZ2 - RaidZ2, - /// RaidZ3 - RaidZ3, -} - -#[api()] -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "kebab-case")] -/// zpool list item -pub struct ZpoolListItem { - /// zpool name - pub name: String, - /// Health - pub health: String, - /// Total size - pub size: u64, - /// Used size - pub alloc: u64, - /// Free space - pub free: u64, - /// ZFS fragnentation level - pub frag: u64, - /// ZFS deduplication ratio - pub dedup: f64, -} diff --git a/pbs-api-types/tests/group_filter_tests.rs b/pbs-api-types/tests/group_filter_tests.rs deleted file mode 100644 index 89a7ddd1..00000000 --- a/pbs-api-types/tests/group_filter_tests.rs +++ /dev/null @@ -1,76 +0,0 @@ -use pbs_api_types::{BackupGroup, BackupType, GroupFilter}; -use std::str::FromStr; - -#[test] -fn test_no_filters() { - let group_filters = vec![]; - - let do_backup = [ - "vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109", - ]; - - for id in do_backup { - assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } -} - -#[test] -fn test_include_filters() { - let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()]; - - let do_backup = [ - "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", - ]; - - let dont_backup = ["vm/101", "vm/109"]; - - for id in do_backup { - assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } - - for id in dont_backup { - assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } -} - -#[test] -fn test_exclude_filters() { - let group_filters = [ - GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(), - GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(), - ]; - - let do_backup = ["vm/104", "vm/108", "vm/109"]; - - let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"]; - - for id in do_backup { - assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } - for id in dont_backup { - assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } -} - -#[test] -fn test_include_and_exclude_filters() { - let group_filters = [ - GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(), - GroupFilter::from_str("regex:.*10[2-8]").unwrap(), - GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(), - ]; - - let do_backup = ["vm/104", "vm/108"]; - - let dont_backup = [ - "vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109", - ]; - - for id in do_backup { - assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } - - for id in dont_backup { - assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters)); - } -}