forked from proxmox-mirrors/proxmox
Merge remote-tracking branch 'prepare-pbs-api-types-for-move/prepare-pbs-api-types-for-move'
Move over the whole history of pbs-api-types from the proxmox-backup git repo to the common workspace as we want to re-use that in PDM and the yew UI components, and thus require a real source-code package for this crate. Choose this repo over proxmox-api-types to avoid the need to copy this build-system over there, rather we want to also merge pve-api-types from that repo into ours here in the future. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
commit
b7b00b5205
24
pbs-api-types/Cargo.toml
Normal file
24
pbs-api-types/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "pbs-api-types"
|
||||
version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
description = "general API type helpers for PBS"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
const_format.workspace = true
|
||||
hex.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_plain.workspace = true
|
||||
|
||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-serde.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid = { workspace = true, features = [ "serde" ] }
|
332
pbs-api-types/src/acl.rs
Normal file
332
pbs-api-types/src/acl.rs
Normal file
@ -0,0 +1,332 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::de::{value, IntoDeserializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_lang::constnamedbitmap;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||
};
|
||||
|
||||
use crate::PROXMOX_SAFE_ID_REGEX_STR;
|
||||
|
||||
const_regex! {
|
||||
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
|
||||
}
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
constnamedbitmap! {
|
||||
/// Contains a list of privilege name to privilege value mappings.
|
||||
///
|
||||
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
||||
/// allow easy matching of privileges as bitflags.
|
||||
PRIVILEGES: u64 => {
|
||||
/// Sys.Audit allows knowing about the system and its status
|
||||
PRIV_SYS_AUDIT("Sys.Audit");
|
||||
/// Sys.Modify allows modifying system-level configuration
|
||||
PRIV_SYS_MODIFY("Sys.Modify");
|
||||
/// Sys.Modify allows to poweroff/reboot/.. the system
|
||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||
|
||||
/// Datastore.Audit allows knowing about a datastore,
|
||||
/// including reading the configuration entry and listing its contents
|
||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||
/// Datastore.Allocate allows creating or deleting datastores
|
||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||
/// Datastore.Modify allows modifying a datastore and its contents
|
||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||
/// Datastore.Read allows reading arbitrary backup contents
|
||||
PRIV_DATASTORE_READ("Datastore.Read");
|
||||
/// Allows verifying a datastore
|
||||
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
||||
|
||||
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||
/// Datastore.Prune allows deleting snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||
|
||||
/// Permissions.Modify allows modifying ACLs
|
||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||
|
||||
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||
/// Remote.Modify allows modifying remote.cfg
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||
/// Remote.Read allows reading data from a configured `Remote`
|
||||
PRIV_REMOTE_READ("Remote.Read");
|
||||
/// Remote.DatastoreBackup allows creating new snapshots on remote datastores
|
||||
PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup");
|
||||
/// Remote.DatastoreModify allows to modify remote datastores
|
||||
PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify");
|
||||
/// Remote.DatastorePrune allows deleting snapshots on remote datastores
|
||||
PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune");
|
||||
|
||||
/// Sys.Console allows access to the system's console
|
||||
PRIV_SYS_CONSOLE("Sys.Console");
|
||||
|
||||
/// Tape.Audit allows reading tape backup configuration and status
|
||||
PRIV_TAPE_AUDIT("Tape.Audit");
|
||||
/// Tape.Modify allows modifying tape backup configuration
|
||||
PRIV_TAPE_MODIFY("Tape.Modify");
|
||||
/// Tape.Write allows writing tape media
|
||||
PRIV_TAPE_WRITE("Tape.Write");
|
||||
/// Tape.Read allows reading tape backup configuration and media contents
|
||||
PRIV_TAPE_READ("Tape.Read");
|
||||
|
||||
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
||||
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
||||
PRIVILEGES
|
||||
.iter()
|
||||
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
||||
if value & privs != 0 {
|
||||
priv_names.push(name);
|
||||
}
|
||||
priv_names
|
||||
})
|
||||
}
|
||||
|
||||
/// Admin always has all privileges. It can do everything except a few actions
|
||||
/// which are limited to the 'root@pam` superuser
|
||||
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Audit can view configuration and status information, but not modify it.
|
||||
pub const ROLE_AUDIT: u64 = 0
|
||||
| PRIV_SYS_AUDIT
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Admin can do anything on the datastore.
|
||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_MODIFY
|
||||
| PRIV_DATASTORE_READ
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_BACKUP
|
||||
| PRIV_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Reader can read/verify datastore content and do restore
|
||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Backup can do backup and restore, but no prune.
|
||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_DATASTORE_PRUNE
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Audit can audit the datastore.
|
||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Audit can audit the remote
|
||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Admin can do anything on the remote.
|
||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_MODIFY
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncOperator can do read and prune on the remote.
|
||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncPushOperator can read and push snapshots to the remote.
|
||||
pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots
|
||||
/// and groups but not create or remove namespaces.
|
||||
pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP
|
||||
| PRIV_REMOTE_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots
|
||||
/// and groups, as well as create or remove namespaces.
|
||||
pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP
|
||||
| PRIV_REMOTE_DATASTORE_MODIFY
|
||||
| PRIV_REMOTE_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Audit can audit the tape backup configuration and media content
|
||||
pub const ROLE_TAPE_AUDIT: u64 = 0
|
||||
| PRIV_TAPE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Admin can do anything on the tape backup
|
||||
pub const ROLE_TAPE_ADMIN: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_MODIFY
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
||||
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Reader can do read and inspect tape content
|
||||
pub const ROLE_TAPE_READER: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
||||
|
||||
#[api(
|
||||
type_text: "<role>",
|
||||
)]
|
||||
#[repr(u64)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Enum representing roles via their [PRIVILEGES] combination.
|
||||
///
|
||||
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
||||
/// single, unique `u64` value that is used in this enum definition.
|
||||
pub enum Role {
|
||||
/// Administrator
|
||||
Admin = ROLE_ADMIN,
|
||||
/// Auditor
|
||||
Audit = ROLE_AUDIT,
|
||||
/// Disable Access
|
||||
NoAccess = ROLE_NO_ACCESS,
|
||||
/// Datastore Administrator
|
||||
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
||||
/// Datastore Reader (inspect datastore content and do restores)
|
||||
DatastoreReader = ROLE_DATASTORE_READER,
|
||||
/// Datastore Backup (backup and restore owned backups)
|
||||
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
||||
/// Datastore PowerUser (backup, restore and prune owned backup)
|
||||
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
||||
/// Datastore Auditor
|
||||
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
||||
/// Remote Auditor
|
||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||
/// Remote Administrator
|
||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||
/// Synchronization Operator
|
||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||
/// Synchronisation Operator (push direction)
|
||||
RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR,
|
||||
/// Remote Datastore Prune
|
||||
RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER,
|
||||
/// Remote Datastore Admin
|
||||
RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN,
|
||||
/// Tape Auditor
|
||||
TapeAudit = ROLE_TAPE_AUDIT,
|
||||
/// Tape Administrator
|
||||
TapeAdmin = ROLE_TAPE_ADMIN,
|
||||
/// Tape Operator
|
||||
TapeOperator = ROLE_TAPE_OPERATOR,
|
||||
/// Tape Reader
|
||||
TapeReader = ROLE_TAPE_READER,
|
||||
}
|
||||
|
||||
impl FromStr for Role {
|
||||
type Err = value::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::deserialize(s.into_deserializer())
|
||||
}
|
||||
}
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
propagate: {
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
ugid_type: {
|
||||
schema: ACL_UGID_TYPE_SCHEMA,
|
||||
},
|
||||
ugid: {
|
||||
type: String,
|
||||
description: "User or Group ID.",
|
||||
},
|
||||
roleid: {
|
||||
type: Role,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ACL list entry.
|
||||
pub struct AclListItem {
|
||||
pub path: String,
|
||||
pub ugid: String,
|
||||
pub ugid_type: String,
|
||||
pub propagate: bool,
|
||||
pub roleid: String,
|
||||
}
|
98
pbs-api-types/src/ad.rs
Normal file
98
pbs-api-types/src/ad.rs
Normal file
@ -0,0 +1,98 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, Updater};
|
||||
|
||||
use super::{
|
||||
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// AD realm configuration properties.
|
||||
pub struct AdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// AD server address
|
||||
pub server1: String,
|
||||
/// Fallback AD server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// AD server Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
|
||||
/// overridden if the need arises.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub base_dn: Option<String>,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for AD sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of LDAP attributes to sync from AD to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
95
pbs-api-types/src/crypto.rs
Normal file
95
pbs-api-types/src/crypto.rs
Normal file
@ -0,0 +1,95 @@
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
||||
#[serde(transparent)]
|
||||
/// 32-byte fingerprint, usually calculated with SHA256.
|
||||
pub struct Fingerprint {
|
||||
#[serde(with = "bytes_as_fingerprint")]
|
||||
bytes: [u8; 32],
|
||||
}
|
||||
|
||||
impl Fingerprint {
|
||||
pub fn new(bytes: [u8; 32]) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
pub fn bytes(&self) -> &[u8; 32] {
|
||||
&self.bytes
|
||||
}
|
||||
pub fn signature(&self) -> String {
|
||||
as_fingerprint(&self.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Display as short key ID
|
||||
impl Display for Fingerprint {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Fingerprint {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
let mut tmp = s.to_string();
|
||||
tmp.retain(|c| c != ':');
|
||||
let mut bytes = [0u8; 32];
|
||||
hex::decode_to_slice(&tmp, &mut bytes)?;
|
||||
Ok(Fingerprint::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
fn as_fingerprint(bytes: &[u8]) -> String {
|
||||
hex::encode(bytes)
|
||||
.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
||||
.collect::<Vec<&str>>()
|
||||
.join(":")
|
||||
}
|
||||
|
||||
pub mod bytes_as_fingerprint {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = super::as_fingerprint(bytes);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
||||
// hex::decode by-byte
|
||||
let mut s = String::deserialize(deserializer)?;
|
||||
s.retain(|c| c != ':');
|
||||
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
||||
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
Ok(unsafe { out.assume_init() })
|
||||
}
|
||||
}
|
1971
pbs-api-types/src/datastore.rs
Normal file
1971
pbs-api-types/src/datastore.rs
Normal file
File diff suppressed because it is too large
Load Diff
30
pbs-api-types/src/file_restore.rs
Normal file
30
pbs-api-types/src/file_restore.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// General status information about a running VM file-restore daemon
|
||||
pub struct RestoreDaemonStatus {
|
||||
/// VM uptime in seconds
|
||||
pub uptime: i64,
|
||||
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||
/// not set, as then the status call will have reset the timer before returning the value
|
||||
pub timeout: i64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The desired format of the result.
|
||||
pub enum FileRestoreFormat {
|
||||
/// Plain file (only works for single files)
|
||||
Plain,
|
||||
/// PXAR archive
|
||||
Pxar,
|
||||
/// ZIP archive
|
||||
Zip,
|
||||
/// TAR archive
|
||||
Tar,
|
||||
}
|
844
pbs-api-types/src/jobs.rs
Normal file
844
pbs-api-types/src/jobs.rs
Normal file
@ -0,0 +1,844 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::bail;
|
||||
use const_format::concatcp;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
|
||||
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
|
||||
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
|
||||
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
|
||||
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
|
||||
}
|
||||
|
||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
)
|
||||
.default(false)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Job Scheduling Status
|
||||
pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// When do we send notifications
|
||||
pub enum Notify {
|
||||
/// Never send notification
|
||||
Never,
|
||||
/// Send notifications for failed and successful jobs
|
||||
Always,
|
||||
/// Send notifications for failed jobs only
|
||||
Error,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
gc: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
verify: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
sync: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
prune: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Datastore notify settings
|
||||
pub struct DatastoreNotify {
|
||||
/// Garbage collection settings
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc: Option<Notify>,
|
||||
/// Verify job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<Notify>,
|
||||
/// Sync job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync: Option<Notify>,
|
||||
/// Prune job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prune: Option<Notify>,
|
||||
}
|
||||
|
||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&DatastoreNotify::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Do not verify backups that are already verified if their verification is not outdated.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
||||
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
optional: true,
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
},
|
||||
"max-depth": {
|
||||
optional: true,
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Verification Job
|
||||
pub struct VerificationJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
/// the datastore ID this verification job affects
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if not set to false, check the age of the last snapshot verification to filter
|
||||
/// out recent ones, depending on 'outdated_after' configuration.
|
||||
pub ignore_verified: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||
pub outdated_after: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// on which backup namespace to run the verification recursively
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
|
||||
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
impl VerificationJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: VerificationJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Verification Job
|
||||
pub struct VerificationJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: VerificationJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"latest-only": {
|
||||
description: "Backup latest snapshots only.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job Setup
|
||||
pub struct TapeBackupJobSetup {
|
||||
pub store: String,
|
||||
pub pool: String,
|
||||
pub drive: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub eject_media: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_media_set: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub latest_only: Option<bool>,
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notification_mode: Option<NotificationMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
setup: {
|
||||
type: TapeBackupJobSetup,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job
|
||||
pub struct TapeBackupJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
#[serde(flatten)]
|
||||
pub setup: TapeBackupJobSetup,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TapeBackupJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Tape Backup Job
|
||||
pub struct TapeBackupJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: TapeBackupJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
/// Next tape used (best guess)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_media_label: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
||||
pub enum FilterType {
|
||||
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
||||
BackupType(BackupType),
|
||||
/// Full identifier of BackupGroup, including type
|
||||
Group(String),
|
||||
/// A regular expression matched against the full identifier of the BackupGroup
|
||||
Regex(Regex),
|
||||
}
|
||||
|
||||
impl PartialEq for FilterType {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(Self::BackupType(a), Self::BackupType(b)) => a == b,
|
||||
(Self::Group(a), Self::Group(b)) => a == b,
|
||||
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for FilterType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s.split_once(':') {
|
||||
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
|
||||
Some(("type", value)) => FilterType::BackupType(value.parse()?),
|
||||
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
|
||||
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
|
||||
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for FilterType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
||||
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
|
||||
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GroupFilter {
|
||||
pub is_exclude: bool,
|
||||
pub filter_type: FilterType,
|
||||
}
|
||||
|
||||
impl PartialEq for GroupFilter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for GroupFilter {}
|
||||
|
||||
impl std::str::FromStr for GroupFilter {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let (is_exclude, type_str) = match s.split_once(':') {
|
||||
Some(("include", value)) => (false, value),
|
||||
Some(("exclude", value)) => (true, value),
|
||||
_ => (false, s),
|
||||
};
|
||||
|
||||
Ok(GroupFilter {
|
||||
is_exclude,
|
||||
filter_type: type_str.parse()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for GroupFilter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.is_exclude {
|
||||
f.write_str("exclude:")?;
|
||||
}
|
||||
std::fmt::Display::fmt(&self.filter_type, f)
|
||||
}
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
|
||||
proxmox_serde::forward_serialize_to_display!(GroupFilter);
|
||||
|
||||
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
||||
GroupFilter::from_str(input).map(|_| ())
|
||||
}
|
||||
|
||||
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
||||
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
|
||||
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
||||
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
||||
.schema();
|
||||
|
||||
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
|
||||
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
||||
|
||||
pub const TRANSFER_LAST_SCHEMA: Schema =
|
||||
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Direction of the sync job, push or pull
|
||||
pub enum SyncDirection {
|
||||
/// Sync direction pull
|
||||
#[default]
|
||||
Pull,
|
||||
/// Sync direction push
|
||||
Push,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SyncDirection {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SyncDirection::Pull => f.write_str("pull"),
|
||||
SyncDirection::Push => f.write_str("push"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const RESYNC_CORRUPT_SCHEMA: Schema =
|
||||
BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"owner": {
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remote-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"transfer-last": {
|
||||
schema: TRANSFER_LAST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"resync-corrupt": {
|
||||
schema: RESYNC_CORRUPT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-direction": {
|
||||
type: SyncDirection,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub owner: Option<Authid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// None implies local sync.
|
||||
pub remote: Option<String>,
|
||||
pub remote_store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remote_ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remove_vanished: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub transfer_last: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub resync_corrupt: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_direction: Option<SyncDirection>,
|
||||
}
|
||||
|
||||
impl SyncJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remote_acl_path(&self) -> Option<Vec<&str>> {
|
||||
let remote = self.remote.as_ref()?;
|
||||
match &self.remote_ns {
|
||||
Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)),
|
||||
None => Some(vec!["remote", remote, &self.remote_store]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: SyncJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: SyncJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
|
||||
/// call to prune a specific group, where `max-depth` makes no sense.
|
||||
#[api(
|
||||
properties: {
|
||||
"keep-last": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
|
||||
optional: true,
|
||||
},
|
||||
"keep-hourly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-daily": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-weekly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-monthly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-yearly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct KeepOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_daily: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_weekly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
}
|
||||
|
||||
impl KeepOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep_last.unwrap_or(0)
|
||||
+ self.keep_hourly.unwrap_or(0)
|
||||
+ self.keep_daily.unwrap_or(0)
|
||||
+ self.keep_weekly.unwrap_or(0)
|
||||
+ self.keep_monthly.unwrap_or(0)
|
||||
+ self.keep_yearly.unwrap_or(0)
|
||||
> 0
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
keep: {
|
||||
type: KeepOptions,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct PruneJobOptions {
|
||||
#[serde(flatten)]
|
||||
pub keep: KeepOptions,
|
||||
|
||||
/// The (optional) recursion depth
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
}
|
||||
|
||||
impl PruneJobOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep.keeps_something()
|
||||
}
|
||||
|
||||
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
|
||||
match &self.ns {
|
||||
Some(ns) => ns.acl_path(store),
|
||||
None => vec!["datastore", store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
disable: {
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
options: {
|
||||
type: PruneJobOptions,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Prune configuration.
|
||||
pub struct PruneJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
|
||||
pub store: String,
|
||||
|
||||
/// Disable this job.
|
||||
#[serde(default, skip_serializing_if = "is_false")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
pub disable: bool,
|
||||
|
||||
pub schedule: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub options: PruneJobOptions,
|
||||
}
|
||||
|
||||
impl PruneJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
self.options.acl_path(&self.store)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: PruneJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of prune job
|
||||
pub struct PruneJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: PruneJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
55
pbs-api-types/src/key_derivation.rs
Normal file
55
pbs-api-types/src/key_derivation.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
#[api(default: "scrypt")]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Key derivation function for password protected encryption keys.
|
||||
pub enum Kdf {
|
||||
/// Do not encrypt the key.
|
||||
None,
|
||||
/// Encrypt they key with a password using SCrypt.
|
||||
Scrypt,
|
||||
/// Encrtypt the Key with a password using PBKDF2
|
||||
PBKDF2,
|
||||
}
|
||||
|
||||
impl Default for Kdf {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Kdf::Scrypt
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
},
|
||||
fingerprint: {
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Encryption Key Information
|
||||
pub struct KeyInfo {
|
||||
/// Path to key (if stored in a file)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
pub kdf: Kdf,
|
||||
/// Key creation time
|
||||
pub created: i64,
|
||||
/// Key modification time
|
||||
pub modified: i64,
|
||||
/// Key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
/// Password hint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hint: Option<String>,
|
||||
}
|
208
pbs-api-types/src/ldap.rs
Normal file
208
pbs-api-types/src/ldap.rs
Normal file
@ -0,0 +1,208 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
/// LDAP connection type
|
||||
pub enum LdapMode {
|
||||
/// Plaintext LDAP connection
|
||||
#[serde(rename = "ldap")]
|
||||
#[default]
|
||||
Ldap,
|
||||
/// Secure STARTTLS connection
|
||||
#[serde(rename = "ldap+starttls")]
|
||||
StartTls,
|
||||
/// Secure LDAPS connection
|
||||
#[serde(rename = "ldaps")]
|
||||
Ldaps,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// LDAP configuration properties.
|
||||
pub struct LdapRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// LDAP server address
|
||||
pub server1: String,
|
||||
/// Fallback LDAP server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
pub base_dn: String,
|
||||
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
|
||||
pub user_attr: String,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for LDAP sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of attributes to sync from LDAP to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"remove-vanished": {
|
||||
optional: true,
|
||||
schema: REMOVE_VANISHED_SCHEMA,
|
||||
},
|
||||
},
|
||||
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Default options for LDAP synchronization runs
|
||||
pub struct SyncDefaultsOptions {
|
||||
/// How to handle vanished properties/users
|
||||
pub remove_vanished: Option<String>,
|
||||
/// Enable new users after sync
|
||||
pub enable_new: Option<bool>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// remove-vanished options
|
||||
pub enum RemoveVanished {
|
||||
/// Delete ACLs for vanished users
|
||||
Acl,
|
||||
/// Remove vanished users
|
||||
Entry,
|
||||
/// Remove vanished properties from users (e.g. email)
|
||||
Properties,
|
||||
}
|
||||
|
||||
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
|
||||
|
||||
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncDefaultsOptions::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
const REMOVE_VANISHED_DESCRIPTION: &str =
|
||||
"A semicolon-separated list of things to remove when they or the user \
|
||||
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
|
||||
user when not returned from the sync; ``properties`` removes any \
|
||||
properties on existing user that do not appear in the source. \
|
||||
``acl`` removes ACLs when the user is not returned from the sync.";
|
||||
|
||||
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
|
||||
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of remove-vanished options",
|
||||
&RemoveVanished::API_SCHEMA,
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Determine which LDAP attributes should be synced to which user attributes
|
||||
pub struct SyncAttributes {
|
||||
/// Name of the LDAP attribute containing the user's email address
|
||||
pub email: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's first name
|
||||
pub firstname: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's last name
|
||||
pub lastname: Option<String>,
|
||||
}
|
||||
|
||||
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
|
||||
which LDAP attributes map to which PBS user field. For example, \
|
||||
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
|
||||
``email=mail``.";
|
||||
|
||||
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncAttributes::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of user classes",
|
||||
&StringSchema::new("user class").schema(),
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
|
||||
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
|
||||
then user synchronization will consider all LDAP entities \
|
||||
where ``objectClass: person`` `or` ``objectClass: user``.";
|
||||
|
||||
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
|
||||
.default("inetorgperson,posixaccount,person,user")
|
||||
.schema();
|
373
pbs-api-types/src/lib.rs
Normal file
373
pbs-api-types/src/lib.rs
Normal file
@ -0,0 +1,373 @@
|
||||
//! Basic API types used by most of the PBS code.
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod percent_encoding;
|
||||
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
|
||||
};
|
||||
use proxmox_time::parse_daily_duration;
|
||||
|
||||
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
|
||||
|
||||
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
|
||||
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
|
||||
pub use proxmox_schema::api_types::{
|
||||
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
|
||||
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::NODE_SCHEMA;
|
||||
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
||||
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
|
||||
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
|
||||
|
||||
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
|
||||
|
||||
// re-export APT API types
|
||||
pub use proxmox_apt_api_types::{
|
||||
APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile,
|
||||
APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository,
|
||||
APTUpdateInfo, APTUpdateOptions,
|
||||
};
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_RE: &str =
|
||||
concatcp!("(?:",
|
||||
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
|
||||
")?");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_PATH_RE: &str =
|
||||
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
|
||||
);
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
|
||||
);
|
||||
|
||||
mod acl;
|
||||
pub use acl::*;
|
||||
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod jobs;
|
||||
pub use jobs::*;
|
||||
|
||||
mod key_derivation;
|
||||
pub use key_derivation::{Kdf, KeyInfo};
|
||||
|
||||
mod maintenance;
|
||||
pub use maintenance::*;
|
||||
|
||||
mod network;
|
||||
pub use network::*;
|
||||
|
||||
mod node;
|
||||
pub use node::*;
|
||||
|
||||
pub use proxmox_auth_api::types as userid;
|
||||
pub use proxmox_auth_api::types::{Authid, Userid};
|
||||
pub use proxmox_auth_api::types::{Realm, RealmRef};
|
||||
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
|
||||
pub use proxmox_auth_api::types::{Username, UsernameRef};
|
||||
pub use proxmox_auth_api::types::{
|
||||
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
mod user;
|
||||
pub use user::*;
|
||||
|
||||
pub use proxmox_schema::upid::*;
|
||||
|
||||
mod crypto;
|
||||
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
|
||||
|
||||
pub mod file_restore;
|
||||
|
||||
mod openid;
|
||||
pub use openid::*;
|
||||
|
||||
mod ldap;
|
||||
pub use ldap::*;
|
||||
|
||||
mod ad;
|
||||
pub use ad::*;
|
||||
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
|
||||
mod pathpatterns;
|
||||
pub use pathpatterns::*;
|
||||
|
||||
mod tape;
|
||||
pub use tape::*;
|
||||
|
||||
mod traffic_control;
|
||||
pub use traffic_control::*;
|
||||
|
||||
mod zfs;
|
||||
pub use zfs::*;
|
||||
|
||||
mod metrics;
|
||||
pub use metrics::*;
|
||||
|
||||
mod version;
|
||||
pub use version::*;
|
||||
|
||||
const_regex! {
|
||||
// just a rough check - dummy acceptor is used before persisting
|
||||
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concatcp!(
|
||||
r"^^(?:(?:(",
|
||||
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
|
||||
")@)?(",
|
||||
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
|
||||
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
|
||||
);
|
||||
|
||||
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
||||
}
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
|
||||
|
||||
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
|
||||
|
||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||
|
||||
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(8)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
|
||||
StringSchema::new("Proxmox Backup Server subscription key.")
|
||||
.format(&SUBSCRIPTION_KEY_FORMAT)
|
||||
.min_length(15)
|
||||
.max_length(16)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||
"Prevent changes if current configuration file has different \
|
||||
SHA256 digest. This can be used to prevent concurrent \
|
||||
modifications.",
|
||||
)
|
||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// API schema format definition for repository URLs
|
||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
#[api()]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
/// Storage space usage information.
|
||||
pub struct StorageStatus {
|
||||
/// Total space (bytes).
|
||||
pub total: u64,
|
||||
/// Used space (bytes).
|
||||
pub used: u64,
|
||||
/// Available space (bytes).
|
||||
pub avail: u64,
|
||||
}
|
||||
|
||||
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Node Power command type.
|
||||
pub enum NodePowerCommand {
|
||||
/// Restart the server
|
||||
Reboot,
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The state (result) of a finished worker task.
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID::API_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// Task properties.
|
||||
pub struct TaskListItem {
|
||||
pub upid: String,
|
||||
/// The node name where the task is running on.
|
||||
pub node: String,
|
||||
/// The Unix PID
|
||||
pub pid: i64,
|
||||
/// The task start time (Epoch)
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: String,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
/// Task end status
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
optional: false,
|
||||
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
|
||||
};
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// type of the realm
|
||||
pub enum RealmType {
|
||||
/// The PAM realm
|
||||
Pam,
|
||||
/// The PBS realm
|
||||
Pbs,
|
||||
/// An OpenID Connect realm
|
||||
OpenId,
|
||||
/// An LDAP realm
|
||||
Ldap,
|
||||
/// An Active Directory (AD) realm
|
||||
Ad,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(RealmType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(RealmType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: RealmType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic Information about a realm
|
||||
pub struct BasicRealmInfo {
|
||||
pub realm: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: RealmType,
|
||||
/// True if it is the default realm
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub default: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
110
pbs-api-types/src/maintenance.rs
Normal file
110
pbs-api-types/src/maintenance.rs
Normal file
@ -0,0 +1,110 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
const_regex! {
|
||||
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
|
||||
}
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
|
||||
StringSchema::new("Message describing the reason for the maintenance.")
|
||||
.format(&MAINTENANCE_MESSAGE_FORMAT)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
/// Operation requirements, used when checking for maintenance mode.
|
||||
pub enum Operation {
|
||||
/// for any read operation like backup restore or RRD metric collection
|
||||
Read,
|
||||
/// for any write/delete operation, like backup create or GC
|
||||
Write,
|
||||
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
|
||||
/// some mutex could be locked (e.g., GC already running?)
|
||||
///
|
||||
/// NOTE: one must *not* do any IO operations when only helding this Op state
|
||||
Lookup,
|
||||
// GarbageCollect or Delete?
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Maintenance type.
|
||||
pub enum MaintenanceType {
|
||||
// TODO:
|
||||
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
|
||||
// operation, so that one can enable a mode where nothing new can be added but stuff can be
|
||||
// cleaned
|
||||
/// Only read operations are allowed on the datastore.
|
||||
ReadOnly,
|
||||
/// Neither read nor write operations are allowed on the datastore.
|
||||
Offline,
|
||||
/// The datastore is being deleted.
|
||||
Delete,
|
||||
/// The (removable) datastore is being unmounted.
|
||||
Unmount,
|
||||
}
|
||||
serde_plain::derive_display_from_serialize!(MaintenanceType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
type: {
|
||||
type: MaintenanceType,
|
||||
},
|
||||
message: {
|
||||
optional: true,
|
||||
schema: MAINTENANCE_MESSAGE_SCHEMA,
|
||||
}
|
||||
},
|
||||
default_key: "type",
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Maintenance mode
|
||||
pub struct MaintenanceMode {
|
||||
/// Type of maintenance ("read-only" or "offline").
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MaintenanceType,
|
||||
|
||||
/// Reason for maintenance.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
impl MaintenanceMode {
|
||||
/// Used for deciding whether the datastore is cleared from the internal cache
|
||||
pub fn clear_from_cache(&self) -> bool {
|
||||
self.ty == MaintenanceType::Offline
|
||||
|| self.ty == MaintenanceType::Delete
|
||||
|| self.ty == MaintenanceType::Unmount
|
||||
}
|
||||
|
||||
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
|
||||
if self.ty == MaintenanceType::Delete {
|
||||
bail!("datastore is being deleted");
|
||||
}
|
||||
|
||||
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
|
||||
.decode_utf8()
|
||||
.unwrap_or(Cow::Borrowed(""));
|
||||
|
||||
if let Some(Operation::Lookup) = operation {
|
||||
return Ok(());
|
||||
} else if self.ty == MaintenanceType::Unmount {
|
||||
bail!("datastore is being unmounted");
|
||||
} else if self.ty == MaintenanceType::Offline {
|
||||
bail!("offline maintenance mode: {}", message);
|
||||
} else if self.ty == MaintenanceType::ReadOnly {
|
||||
if let Some(Operation::Write) = operation {
|
||||
bail!("read-only maintenance mode: {}", message);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
255
pbs-api-types/src/metrics.rs
Normal file
255
pbs-api-types/src/metrics.rs
Normal file
@ -0,0 +1,255 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
use proxmox_schema::{api, Schema, StringSchema, Updater};
|
||||
|
||||
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
fn return_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn is_true(b: &bool) -> bool {
|
||||
*b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
host: {
|
||||
schema: HOST_PORT_SCHEMA,
|
||||
},
|
||||
mtu: {
|
||||
type: u16,
|
||||
optional: true,
|
||||
default: 1500,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (UDP)
|
||||
pub struct InfluxDbUdp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// the host + port
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The MTU
|
||||
pub mtu: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
url: {
|
||||
schema: HTTP_URL_SCHEMA,
|
||||
},
|
||||
token: {
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bucket: {
|
||||
schema: INFLUXDB_BUCKET_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
organization: {
|
||||
schema: INFLUXDB_ORGANIZATION_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-body-size": {
|
||||
type: usize,
|
||||
optional: true,
|
||||
default: 25_000_000,
|
||||
},
|
||||
"verify-tls": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (HTTP(s))
|
||||
pub struct InfluxDbHttp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// The base url of the influxdb server
|
||||
pub url: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) API token
|
||||
pub token: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Named location where time series data is stored
|
||||
pub bucket: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Workspace for a group of users
|
||||
pub organization: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) maximum body size
|
||||
pub max_body_size: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// If true, the certificate will be validated.
|
||||
pub verify_tls: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
/// Type of the metric server
|
||||
pub enum MetricServerType {
|
||||
/// InfluxDB HTTP
|
||||
#[serde(rename = "influxdb-http")]
|
||||
InfluxDbHttp,
|
||||
/// InfluxDB UDP
|
||||
#[serde(rename = "influxdb-udp")]
|
||||
InfluxDbUdp,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: MetricServerType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic information about a metric server that's available for all types
|
||||
pub struct MetricServerInfo {
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricServerType,
|
||||
/// Enables or disables the metrics server
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
/// The target server
|
||||
pub server: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[api(
|
||||
properties: {
|
||||
data: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: MetricDataPoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Return type for the metric API endpoint
|
||||
pub struct Metrics {
|
||||
/// List of metric data points, sorted by timestamp
|
||||
pub data: Vec<MetricDataPoint>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
type: String,
|
||||
},
|
||||
metric: {
|
||||
type: String,
|
||||
},
|
||||
timestamp: {
|
||||
type: Integer,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Metric data point
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct MetricDataPoint {
|
||||
/// Unique identifier for this metric object, for instance `node/<nodename>`
|
||||
/// or `qemu/<vmid>`.
|
||||
pub id: String,
|
||||
|
||||
/// Name of the metric.
|
||||
pub metric: String,
|
||||
|
||||
/// Time at which this metric was observed
|
||||
pub timestamp: i64,
|
||||
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricDataType,
|
||||
|
||||
/// Metric value.
|
||||
pub value: f64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Type of the metric.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum MetricDataType {
|
||||
/// gauge.
|
||||
Gauge,
|
||||
/// counter.
|
||||
Counter,
|
||||
/// derive.
|
||||
Derive,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(MetricDataType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MetricDataType);
|
345
pbs-api-types/src/network.rs
Normal file
345
pbs-api-types/src/network.rs
Normal file
@ -0,0 +1,345 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX,
|
||||
};
|
||||
|
||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
|
||||
.format(&IP_V4_FORMAT)
|
||||
.max_length(15)
|
||||
.schema();
|
||||
|
||||
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
|
||||
.format(&IP_V6_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||
.format(&IP_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V4_FORMAT)
|
||||
.max_length(18)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V6_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_SCHEMA: Schema =
|
||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||
.format(&CIDR_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Interface configuration method
|
||||
pub enum NetworkConfigMethod {
|
||||
/// Configuration is done manually using other tools
|
||||
Manual,
|
||||
/// Define interfaces with statically allocated addresses.
|
||||
Static,
|
||||
/// Obtain an address via DHCP
|
||||
DHCP,
|
||||
/// Define the loopback interface.
|
||||
Loopback,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Linux Bond Mode
|
||||
pub enum LinuxBondMode {
|
||||
/// Round-robin policy
|
||||
BalanceRr = 0,
|
||||
/// Active-backup policy
|
||||
ActiveBackup = 1,
|
||||
/// XOR policy
|
||||
BalanceXor = 2,
|
||||
/// Broadcast policy
|
||||
Broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
#[serde(rename = "802.3ad")]
|
||||
Ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
BalanceTlb = 5,
|
||||
/// Adaptive load balancing
|
||||
BalanceAlb = 6,
|
||||
}
|
||||
|
||||
impl fmt::Display for LinuxBondMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
LinuxBondMode::BalanceRr => "balance-rr",
|
||||
LinuxBondMode::ActiveBackup => "active-backup",
|
||||
LinuxBondMode::BalanceXor => "balance-xor",
|
||||
LinuxBondMode::Broadcast => "broadcast",
|
||||
LinuxBondMode::Ieee802_3ad => "802.3ad",
|
||||
LinuxBondMode::BalanceTlb => "balance-tlb",
|
||||
LinuxBondMode::BalanceAlb => "balance-alb",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||
pub enum BondXmitHashPolicy {
|
||||
/// Layer 2
|
||||
Layer2 = 0,
|
||||
/// Layer 2+3
|
||||
#[serde(rename = "layer2+3")]
|
||||
Layer2_3 = 1,
|
||||
/// Layer 3+4
|
||||
#[serde(rename = "layer3+4")]
|
||||
Layer3_4 = 2,
|
||||
}
|
||||
|
||||
impl fmt::Display for BondXmitHashPolicy {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
BondXmitHashPolicy::Layer2 => "layer2",
|
||||
BondXmitHashPolicy::Layer2_3 => "layer2+3",
|
||||
BondXmitHashPolicy::Layer3_4 => "layer3+4",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Network interface type
|
||||
pub enum NetworkInterfaceType {
|
||||
/// Loopback
|
||||
Loopback,
|
||||
/// Physical Ethernet device
|
||||
Eth,
|
||||
/// Linux Bridge
|
||||
Bridge,
|
||||
/// Linux Bond
|
||||
Bond,
|
||||
/// Linux VLAN (eth.10)
|
||||
Vlan,
|
||||
/// Interface Alias (eth:1)
|
||||
Alias,
|
||||
/// Unknown interface type
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||
.format(&NETWORK_INTERFACE_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(15) // libc::IFNAMSIZ-1
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
|
||||
StringSchema::new("A list of network devices, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: NetworkInterfaceType,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
options: {
|
||||
description: "Option list (inet)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
options6: {
|
||||
description: "Option list (inet6)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet6, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-id": {
|
||||
description: "VLAN ID.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-raw-device": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
/// Network Interface configuration
|
||||
pub struct Interface {
|
||||
/// Autostart interface
|
||||
#[serde(rename = "autostart")]
|
||||
pub autostart: bool,
|
||||
/// Interface is active (UP)
|
||||
pub active: bool,
|
||||
/// Interface name
|
||||
pub name: String,
|
||||
/// Interface type
|
||||
#[serde(rename = "type")]
|
||||
pub interface_type: NetworkInterfaceType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method6: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 address with netmask
|
||||
pub cidr: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 gateway
|
||||
pub gateway: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 address with netmask
|
||||
pub cidr6: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 gateway
|
||||
pub gateway6: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options6: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Maximum Transmission Unit
|
||||
pub mtu: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_ports: Option<Vec<String>>,
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-id")]
|
||||
pub vlan_id: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-raw-device")]
|
||||
pub vlan_raw_device: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "bond-primary")]
|
||||
pub bond_primary: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
}
|
||||
|
||||
impl Interface {
|
||||
pub fn new(name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
interface_type: NetworkInterfaceType::Unknown,
|
||||
autostart: false,
|
||||
active: false,
|
||||
method: None,
|
||||
method6: None,
|
||||
cidr: None,
|
||||
gateway: None,
|
||||
cidr6: None,
|
||||
gateway6: None,
|
||||
options: Vec::new(),
|
||||
options6: Vec::new(),
|
||||
comments: None,
|
||||
comments6: None,
|
||||
mtu: None,
|
||||
bridge_ports: None,
|
||||
bridge_vlan_aware: None,
|
||||
vlan_id: None,
|
||||
vlan_raw_device: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
bond_xmit_hash_policy: None,
|
||||
}
|
||||
}
|
||||
}
|
162
pbs-api-types/src/node.rs
Normal file
162
pbs-api-types/src/node.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::StorageStatus;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node memory usage counters
|
||||
pub struct NodeMemoryCounters {
|
||||
/// Total memory
|
||||
pub total: u64,
|
||||
/// Used memory
|
||||
pub used: u64,
|
||||
/// Free memory
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node swap usage counters
|
||||
pub struct NodeSwapCounters {
|
||||
/// Total swap
|
||||
pub total: u64,
|
||||
/// Used swap
|
||||
pub used: u64,
|
||||
/// Free swap
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Contains general node information such as the fingerprint`
|
||||
pub struct NodeInformation {
|
||||
/// The SSL Fingerprint
|
||||
pub fingerprint: String,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The current kernel version (output of `uname`)
|
||||
pub struct KernelVersionInformation {
|
||||
/// The systemname/nodename
|
||||
pub sysname: String,
|
||||
/// The kernel release number
|
||||
pub release: String,
|
||||
/// The kernel version
|
||||
pub version: String,
|
||||
/// The machine architecture
|
||||
pub machine: String,
|
||||
}
|
||||
|
||||
impl KernelVersionInformation {
|
||||
pub fn from_uname_parts(
|
||||
sysname: &OsStr,
|
||||
release: &OsStr,
|
||||
version: &OsStr,
|
||||
machine: &OsStr,
|
||||
) -> Self {
|
||||
KernelVersionInformation {
|
||||
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
|
||||
release: release.to_str().map(String::from).unwrap_or_default(),
|
||||
version: version.to_str().map(String::from).unwrap_or_default(),
|
||||
machine: machine.to_str().map(String::from).unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_legacy(&self) -> String {
|
||||
format!("{} {} {}", self.sysname, self.release, self.version)
|
||||
}
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The possible BootModes
|
||||
pub enum BootMode {
|
||||
/// The BootMode is EFI/UEFI
|
||||
Efi,
|
||||
/// The BootMode is Legacy BIOS
|
||||
LegacyBios,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Holds the Bootmodes
|
||||
pub struct BootModeInformation {
|
||||
/// The BootMode, either Efi or Bios
|
||||
pub mode: BootMode,
|
||||
/// SecureBoot status
|
||||
pub secureboot: bool,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Information about the CPU
|
||||
pub struct NodeCpuInformation {
|
||||
/// The CPU model
|
||||
pub model: String,
|
||||
/// The number of CPU sockets
|
||||
pub sockets: usize,
|
||||
/// The number of CPU cores (incl. threads)
|
||||
pub cpus: usize,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
memory: {
|
||||
type: NodeMemoryCounters,
|
||||
},
|
||||
root: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
swap: {
|
||||
type: NodeSwapCounters,
|
||||
},
|
||||
loadavg: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: Number,
|
||||
description: "the load",
|
||||
}
|
||||
},
|
||||
cpuinfo: {
|
||||
type: NodeCpuInformation,
|
||||
},
|
||||
info: {
|
||||
type: NodeInformation,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The Node status
|
||||
pub struct NodeStatus {
|
||||
pub memory: NodeMemoryCounters,
|
||||
pub root: StorageStatus,
|
||||
pub swap: NodeSwapCounters,
|
||||
/// The current uptime of the server.
|
||||
pub uptime: u64,
|
||||
/// Load for 1, 5 and 15 minutes.
|
||||
pub loadavg: [f64; 3],
|
||||
/// The current kernel version (NEW struct type).
|
||||
pub current_kernel: KernelVersionInformation,
|
||||
/// The current kernel version (LEGACY string type).
|
||||
pub kversion: String,
|
||||
/// Total CPU usage since last query.
|
||||
pub cpu: f64,
|
||||
/// Total IO wait since last query.
|
||||
pub wait: f64,
|
||||
pub cpuinfo: NodeCpuInformation,
|
||||
pub info: NodeInformation,
|
||||
/// Current boot mode
|
||||
pub boot_info: BootModeInformation,
|
||||
}
|
120
pbs-api-types/src/openid.rs
Normal file
120
pbs-api-types/src/openid.rs
Normal file
@ -0,0 +1,120 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{
|
||||
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
|
||||
.format(&OPENID_SCOPE_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
|
||||
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
|
||||
.format(&OPENID_SCOPE_LIST_FORMAT)
|
||||
.default(OPENID_DEFAILT_SCOPE_LIST)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
|
||||
|
||||
pub const OPENID_ACR_SCHEMA: Schema =
|
||||
StringSchema::new("OpenID Authentication Context Class Reference.")
|
||||
.format(&OPENID_ACR_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
|
||||
.format(&OPENID_ACR_LIST_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
|
||||
"Use the value of this attribute/claim as unique user name. It \
|
||||
is up to the identity provider to guarantee the uniqueness. The \
|
||||
OpenID specification only guarantees that Subject ('sub') is \
|
||||
unique. Also make sure that the user is not allowed to change that \
|
||||
attribute by himself!",
|
||||
)
|
||||
.max_length(64)
|
||||
.min_length(1)
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"client-key": {
|
||||
optional: true,
|
||||
},
|
||||
"scopes": {
|
||||
schema: OPENID_SCOPE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"acr-values": {
|
||||
schema: OPENID_ACR_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
prompt: {
|
||||
description: "OpenID Prompt",
|
||||
type: String,
|
||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
autocreate: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"username-claim": {
|
||||
schema: OPENID_USERNAME_CLAIM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// OpenID configuration properties.
|
||||
pub struct OpenIdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// OpenID Issuer Url
|
||||
pub issuer_url: String,
|
||||
/// OpenID Client ID
|
||||
pub client_id: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub scopes: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub acr_values: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt: Option<String>,
|
||||
/// OpenID Client Key
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub client_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Automatically create users if they do not exist.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub autocreate: Option<bool>,
|
||||
#[updater(skip)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub username_claim: Option<String>,
|
||||
}
|
30
pbs-api-types/src/pathpatterns.rs
Normal file
30
pbs-api-types/src/pathpatterns.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const_regex! {
|
||||
pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$");
|
||||
}
|
||||
|
||||
pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX);
|
||||
|
||||
pub const PATH_PATTERN_SCHEMA: Schema =
|
||||
StringSchema::new("Path or match pattern for matching filenames.")
|
||||
.format(&PATH_PATTERN_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
/// Path or path pattern for filename matching
|
||||
pub struct PathPattern {
|
||||
pattern: String,
|
||||
}
|
||||
|
||||
impl ApiType for PathPattern {
|
||||
const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA;
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for PathPattern {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.pattern.as_bytes()
|
||||
}
|
||||
}
|
22
pbs-api-types/src/percent_encoding.rs
Normal file
22
pbs-api-types/src/percent_encoding.rs
Normal file
@ -0,0 +1,22 @@
|
||||
use percent_encoding::{utf8_percent_encode, AsciiSet};
|
||||
|
||||
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
|
||||
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
|
||||
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
|
||||
.add(0x20)
|
||||
.add(0x7f)
|
||||
// the DEFAULT_ENCODE_SET added:
|
||||
.add(b' ')
|
||||
.add(b'"')
|
||||
.add(b'#')
|
||||
.add(b'<')
|
||||
.add(b'>')
|
||||
.add(b'`')
|
||||
.add(b'?')
|
||||
.add(b'{')
|
||||
.add(b'}');
|
||||
|
||||
/// percent encode a url component
|
||||
pub fn percent_encode_component(comp: &str) -> String {
|
||||
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
|
||||
}
|
106
pbs-api-types/src/remote.rs
Normal file
106
pbs-api-types/src/remote.rs
Normal file
@ -0,0 +1,106 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use proxmox_schema::*;
|
||||
|
||||
pub const REMOTE_PASSWORD_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
optional: true,
|
||||
description: "The (optional) port",
|
||||
type: u16,
|
||||
},
|
||||
"auth-id": {
|
||||
type: Authid,
|
||||
},
|
||||
fingerprint: {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote configuration properties.
|
||||
pub struct RemoteConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
pub auth_id: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
password: {
|
||||
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct Remote {
|
||||
pub name: String,
|
||||
// Note: The stored password is base64 encoded
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
#[serde(with = "proxmox_serde::string_as_base64")]
|
||||
pub password: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct RemoteWithoutPassword {
|
||||
pub name: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
134
pbs-api-types/src/tape/changer.rs
Normal file
134
pbs-api-types/src/tape/changer.rs
Normal file
@ -0,0 +1,134 @@
|
||||
//! Types for tape changer API
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{
|
||||
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
||||
|
||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Slot list.",
|
||||
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
||||
)
|
||||
.schema();
|
||||
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"\
|
||||
A list of slot numbers, comma separated. Those slots are reserved for
|
||||
Import/Export, i.e. any media in those slots are considered to be
|
||||
'offline'.
|
||||
",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
},
|
||||
"export-slots": {
|
||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"eject-before-unload": {
|
||||
optional: true,
|
||||
default: false,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// SCSI tape changer
|
||||
pub struct ScsiTapeChanger {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_slots: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if set to true, tapes are ejected manually before unloading
|
||||
pub eject_before_unload: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Changer config with optional device identification attributes
|
||||
pub struct ChangerListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: ScsiTapeChanger,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Entry Kind
|
||||
pub enum MtxEntryKind {
|
||||
/// Drive
|
||||
Drive,
|
||||
/// Slot
|
||||
Slot,
|
||||
/// Import/Export Slot
|
||||
ImportExport,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"entry-kind": {
|
||||
type: MtxEntryKind,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Status Entry
|
||||
pub struct MtxStatusEntry {
|
||||
pub entry_kind: MtxEntryKind,
|
||||
/// The ID of the slot or drive
|
||||
pub entry_id: u64,
|
||||
/// The media label (volume tag) if the slot/drive is full
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub label_text: Option<String>,
|
||||
/// The slot the drive was loaded from
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub loaded_slot: Option<u64>,
|
||||
/// The current state of the drive
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
}
|
55
pbs-api-types/src/tape/device.rs
Normal file
55
pbs-api-types/src/tape/device.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Optional Device Identification Attributes
|
||||
pub struct OptionalDeviceIdentification {
|
||||
/// Vendor (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vendor: Option<String>,
|
||||
/// Model (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub model: Option<String>,
|
||||
/// Serial number (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub serial: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Kind of device
|
||||
pub enum DeviceKind {
|
||||
/// Tape changer (Autoloader, Robot)
|
||||
Changer,
|
||||
/// Normal SCSI tape device
|
||||
Tape,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kind: {
|
||||
type: DeviceKind,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Tape device information
|
||||
pub struct TapeDeviceInfo {
|
||||
pub kind: DeviceKind,
|
||||
/// Path to the linux device node
|
||||
pub path: String,
|
||||
/// Serial number (autodetected)
|
||||
pub serial: String,
|
||||
/// Vendor (autodetected)
|
||||
pub vendor: String,
|
||||
/// Model (autodetected)
|
||||
pub model: String,
|
||||
/// Device major number
|
||||
pub major: u32,
|
||||
/// Device minor number
|
||||
pub minor: u32,
|
||||
}
|
350
pbs-api-types/src/tape/drive.rs
Normal file
350
pbs-api-types/src/tape/drive.rs
Normal file
@ -0,0 +1,350 @@
|
||||
//! Types for tape drive API
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
|
||||
|
||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
|
||||
IntegerSchema::new("Associated changer drive number (requires option changer)")
|
||||
.minimum(0)
|
||||
.maximum(255)
|
||||
.default(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Simulate tape drives (only for test and debug)
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct VirtualTapeDrive {
|
||||
pub name: String,
|
||||
/// Path to directory
|
||||
pub path: String,
|
||||
/// Virtual tape size
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_size: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||
},
|
||||
changer: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"changer-drivenum": {
|
||||
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Lto SCSI tape driver
|
||||
pub struct LtoTapeDrive {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer_drivenum: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: LtoTapeDrive,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive list entry
|
||||
pub struct DriveListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: LtoTapeDrive,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
/// the state of the drive if locked
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Medium auxiliary memory attributes (MAM)
|
||||
pub struct MamAttribute {
|
||||
/// Attribute id
|
||||
pub id: u16,
|
||||
/// Attribute name
|
||||
pub name: String,
|
||||
/// Attribute value
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
|
||||
/// The density of a tape medium, derived from the LTO version.
|
||||
pub enum TapeDensity {
|
||||
/// Unknown (no media loaded)
|
||||
Unknown,
|
||||
/// LTO1
|
||||
LTO1,
|
||||
/// LTO2
|
||||
LTO2,
|
||||
/// LTO3
|
||||
LTO3,
|
||||
/// LTO4
|
||||
LTO4,
|
||||
/// LTO5
|
||||
LTO5,
|
||||
/// LTO6
|
||||
LTO6,
|
||||
/// LTO7
|
||||
LTO7,
|
||||
/// LTO7M8
|
||||
LTO7M8,
|
||||
/// LTO8
|
||||
LTO8,
|
||||
/// LTO9
|
||||
LTO9,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for TapeDensity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
let density = match value {
|
||||
0x00 => TapeDensity::Unknown,
|
||||
0x40 => TapeDensity::LTO1,
|
||||
0x42 => TapeDensity::LTO2,
|
||||
0x44 => TapeDensity::LTO3,
|
||||
0x46 => TapeDensity::LTO4,
|
||||
0x58 => TapeDensity::LTO5,
|
||||
0x5a => TapeDensity::LTO6,
|
||||
0x5c => TapeDensity::LTO7,
|
||||
0x5d => TapeDensity::LTO7M8,
|
||||
0x5e => TapeDensity::LTO8,
|
||||
0x60 => TapeDensity::LTO9,
|
||||
_ => bail!("unknown tape density code 0x{:02x}", value),
|
||||
};
|
||||
Ok(density)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
density: {
|
||||
type: TapeDensity,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive/Media status for Lto SCSI drives.
|
||||
///
|
||||
/// Media related data is optional - only set if there is a medium
|
||||
/// loaded.
|
||||
pub struct LtoDriveAndMediaStatus {
|
||||
/// Vendor
|
||||
pub vendor: String,
|
||||
/// Product
|
||||
pub product: String,
|
||||
/// Revision
|
||||
pub revision: String,
|
||||
/// Block size (0 is variable size)
|
||||
pub blocksize: u32,
|
||||
/// Compression enabled
|
||||
pub compression: bool,
|
||||
/// Drive buffer mode
|
||||
pub buffer_mode: u8,
|
||||
/// Tape density
|
||||
pub density: TapeDensity,
|
||||
/// Media is write protected
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub write_protect: Option<bool>,
|
||||
/// Tape Alert Flags
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alert_flags: Option<String>,
|
||||
/// Current file number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub file_number: Option<u64>,
|
||||
/// Current block number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub block_number: Option<u64>,
|
||||
/// Medium Manufacture Date (epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub manufactured: Option<i64>,
|
||||
/// Total Bytes Read in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_read: Option<u64>,
|
||||
/// Total Bytes Written in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_written: Option<u64>,
|
||||
/// Number of mounts for the current volume (i.e., Thread Count)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub volume_mounts: Option<u64>,
|
||||
/// Count of the total number of times the medium has passed over
|
||||
/// the head.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_passes: Option<u64>,
|
||||
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_wearout: Option<f64>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub drive_activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
/// Volume statistics from SCSI log page 17h
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Lp17VolumeStatistics {
|
||||
/// Volume mounts (thread count)
|
||||
pub volume_mounts: u64,
|
||||
/// Total data sets written
|
||||
pub volume_datasets_written: u64,
|
||||
/// Write retries
|
||||
pub volume_recovered_write_data_errors: u64,
|
||||
/// Total unrecovered write errors
|
||||
pub volume_unrecovered_write_data_errors: u64,
|
||||
/// Total suspended writes
|
||||
pub volume_write_servo_errors: u64,
|
||||
/// Total fatal suspended writes
|
||||
pub volume_unrecovered_write_servo_errors: u64,
|
||||
/// Total datasets read
|
||||
pub volume_datasets_read: u64,
|
||||
/// Total read retries
|
||||
pub volume_recovered_read_errors: u64,
|
||||
/// Total unrecovered read errors
|
||||
pub volume_unrecovered_read_errors: u64,
|
||||
/// Last mount unrecovered write errors
|
||||
pub last_mount_unrecovered_write_errors: u64,
|
||||
/// Last mount unrecovered read errors
|
||||
pub last_mount_unrecovered_read_errors: u64,
|
||||
/// Last mount bytes written
|
||||
pub last_mount_bytes_written: u64,
|
||||
/// Last mount bytes read
|
||||
pub last_mount_bytes_read: u64,
|
||||
/// Lifetime bytes written
|
||||
pub lifetime_bytes_written: u64,
|
||||
/// Lifetime bytes read
|
||||
pub lifetime_bytes_read: u64,
|
||||
/// Last load write compression ratio
|
||||
pub last_load_write_compression_ratio: u64,
|
||||
/// Last load read compression ratio
|
||||
pub last_load_read_compression_ratio: u64,
|
||||
/// Medium mount time
|
||||
pub medium_mount_time: u64,
|
||||
/// Medium ready time
|
||||
pub medium_ready_time: u64,
|
||||
/// Total native capacity
|
||||
pub total_native_capacity: u64,
|
||||
/// Total used native capacity
|
||||
pub total_used_native_capacity: u64,
|
||||
/// Write protect
|
||||
pub write_protect: bool,
|
||||
/// Volume is WORM
|
||||
pub worm: bool,
|
||||
/// Beginning of medium passes
|
||||
pub beginning_of_medium_passes: u64,
|
||||
/// Middle of medium passes
|
||||
pub middle_of_tape_passes: u64,
|
||||
/// Volume serial number
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
/// The DT Device Activity from DT Device Status LP page
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DeviceActivity {
|
||||
/// No activity
|
||||
NoActivity,
|
||||
/// Cleaning
|
||||
Cleaning,
|
||||
/// Loading
|
||||
Loading,
|
||||
/// Unloading
|
||||
Unloading,
|
||||
/// Other unspecified activity
|
||||
Other,
|
||||
/// Reading
|
||||
Reading,
|
||||
/// Writing
|
||||
Writing,
|
||||
/// Locating
|
||||
Locating,
|
||||
/// Rewinding
|
||||
Rewinding,
|
||||
/// Erasing
|
||||
Erasing,
|
||||
/// Formatting
|
||||
Formatting,
|
||||
/// Calibrating
|
||||
Calibrating,
|
||||
/// Other (DT)
|
||||
OtherDT,
|
||||
/// Updating microcode
|
||||
MicrocodeUpdate,
|
||||
/// Reading encrypted data
|
||||
ReadingEncrypted,
|
||||
/// Writing encrypted data
|
||||
WritingEncrypted,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for DeviceActivity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
Ok(match value {
|
||||
0x00 => DeviceActivity::NoActivity,
|
||||
0x01 => DeviceActivity::Cleaning,
|
||||
0x02 => DeviceActivity::Loading,
|
||||
0x03 => DeviceActivity::Unloading,
|
||||
0x04 => DeviceActivity::Other,
|
||||
0x05 => DeviceActivity::Reading,
|
||||
0x06 => DeviceActivity::Writing,
|
||||
0x07 => DeviceActivity::Locating,
|
||||
0x08 => DeviceActivity::Rewinding,
|
||||
0x09 => DeviceActivity::Erasing,
|
||||
0x0A => DeviceActivity::Formatting,
|
||||
0x0B => DeviceActivity::Calibrating,
|
||||
0x0C => DeviceActivity::OtherDT,
|
||||
0x0D => DeviceActivity::MicrocodeUpdate,
|
||||
0x0E => DeviceActivity::ReadingEncrypted,
|
||||
0x0F => DeviceActivity::WritingEncrypted,
|
||||
other => bail!("invalid DT device activity value: {:x}", other),
|
||||
})
|
||||
}
|
||||
}
|
179
pbs-api-types/src/tape/media.rs
Normal file
179
pbs-api-types/src/tape/media.rs
Normal file
@ -0,0 +1,179 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
|
||||
|
||||
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
|
||||
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
|
||||
)
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media Set list entry
|
||||
pub struct MediaSetListEntry {
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
location: {
|
||||
type: MediaLocation,
|
||||
},
|
||||
status: {
|
||||
type: MediaStatus,
|
||||
},
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media list entry
|
||||
pub struct MediaListEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
pub uuid: Uuid,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
pub location: MediaLocation,
|
||||
pub status: MediaStatus,
|
||||
/// Expired flag
|
||||
pub expired: bool,
|
||||
/// Catalog status OK
|
||||
pub catalog: bool,
|
||||
/// Media set name
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// Media set seq_nr
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Media Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Bytes currently used
|
||||
pub bytes_used: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media label info
|
||||
pub struct MediaIdFlat {
|
||||
/// Unique ID
|
||||
pub uuid: Uuid,
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
// All MediaSet properties are optional here
|
||||
/// MediaSet Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// MediaSet media sequence number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet Creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Encryption key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encryption_key_fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Label with optional Uuid
|
||||
pub struct LabelUuidMap {
|
||||
/// Changer label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Associated Uuid (if any)
|
||||
pub uuid: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media content list entry
|
||||
pub struct MediaContentEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Media Uuid
|
||||
pub uuid: Uuid,
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
/// Media set uuid
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet Creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media set seq_nr
|
||||
pub seq_nr: u64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
/// Datastore Name
|
||||
pub store: String,
|
||||
/// Backup snapshot
|
||||
pub snapshot: String,
|
||||
/// Snapshot creation time (epoch)
|
||||
pub backup_time: i64,
|
||||
}
|
80
pbs-api-types/src/tape/media_location.rs
Normal file
80
pbs-api-types/src/tape/media_location.rs
Normal file
@ -0,0 +1,80 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
/// Media location
|
||||
pub enum MediaLocation {
|
||||
/// Ready for use (inside tape library)
|
||||
Online(String),
|
||||
/// Local available, but need to be mounted (insert into tape
|
||||
/// drive)
|
||||
Offline,
|
||||
/// Media is inside a Vault
|
||||
Vault(String),
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
|
||||
proxmox_serde::forward_serialize_to_display!(MediaLocation);
|
||||
|
||||
impl proxmox_schema::ApiType for MediaLocation {
|
||||
const API_SCHEMA: Schema = StringSchema::new(
|
||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
||||
)
|
||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||
let location: MediaLocation = text.parse()?;
|
||||
match location {
|
||||
MediaLocation::Online(ref changer) => {
|
||||
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
|
||||
}
|
||||
MediaLocation::Vault(ref vault) => {
|
||||
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
|
||||
}
|
||||
MediaLocation::Offline => { /* OK */ }
|
||||
}
|
||||
Ok(())
|
||||
}))
|
||||
.schema();
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MediaLocation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
MediaLocation::Offline => {
|
||||
write!(f, "offline")
|
||||
}
|
||||
MediaLocation::Online(changer) => {
|
||||
write!(f, "online-{}", changer)
|
||||
}
|
||||
MediaLocation::Vault(vault) => {
|
||||
write!(f, "vault-{}", vault)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaLocation {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "offline" {
|
||||
return Ok(MediaLocation::Offline);
|
||||
}
|
||||
if let Some(changer) = s.strip_prefix("online-") {
|
||||
return Ok(MediaLocation::Online(changer.to_string()));
|
||||
}
|
||||
if let Some(vault) = s.strip_prefix("vault-") {
|
||||
return Ok(MediaLocation::Vault(vault.to_string()));
|
||||
}
|
||||
|
||||
bail!("MediaLocation parse error");
|
||||
}
|
||||
}
|
161
pbs-api-types/src/tape/media_pool.rs
Normal file
161
pbs-api-types/src/tape/media_pool.rs
Normal file
@ -0,0 +1,161 @@
|
||||
//! Types for tape media pool API
|
||||
//!
|
||||
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
|
||||
//! so we cannot use them directly for the API. Instead, we represent
|
||||
//! them as String.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
|
||||
|
||||
use proxmox_time::{CalendarEvent, TimeSpan};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
|
||||
"Media set naming template (may contain strftime() time format specifications).",
|
||||
)
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
MediaSetPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
|
||||
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media set allocation policy
|
||||
pub enum MediaSetPolicy {
|
||||
/// Try to use the current media set
|
||||
ContinueCurrent,
|
||||
/// Each backup job creates a new media set
|
||||
AlwaysCreate,
|
||||
/// Create a new set when the specified CalendarEvent triggers
|
||||
CreateAt(CalendarEvent),
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaSetPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "continue" {
|
||||
return Ok(MediaSetPolicy::ContinueCurrent);
|
||||
}
|
||||
if s == "always" {
|
||||
return Ok(MediaSetPolicy::AlwaysCreate);
|
||||
}
|
||||
|
||||
let event = s.parse()?;
|
||||
|
||||
Ok(MediaSetPolicy::CreateAt(event))
|
||||
}
|
||||
}
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
RetentionPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
|
||||
.format(&MEDIA_RETENTION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media retention Policy
|
||||
pub enum RetentionPolicy {
|
||||
/// Always overwrite media
|
||||
OverwriteAlways,
|
||||
/// Protect data for the timespan specified
|
||||
ProtectFor(TimeSpan),
|
||||
/// Never overwrite data
|
||||
KeepForever,
|
||||
}
|
||||
|
||||
impl std::str::FromStr for RetentionPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "overwrite" {
|
||||
return Ok(RetentionPolicy::OverwriteAlways);
|
||||
}
|
||||
if s == "keep" {
|
||||
return Ok(RetentionPolicy::KeepForever);
|
||||
}
|
||||
|
||||
let time_span = s.parse()?;
|
||||
|
||||
Ok(RetentionPolicy::ProtectFor(time_span))
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
allocation: {
|
||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
retention: {
|
||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
template: {
|
||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encrypt: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
/// Media pool configuration
|
||||
pub struct MediaPoolConfig {
|
||||
/// The pool name
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
/// Media Set allocation policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub allocation: Option<String>,
|
||||
/// Media retention policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub retention: Option<String>,
|
||||
/// Media set naming template (default "%c")
|
||||
///
|
||||
/// The template is UTF8 text, and can include strftime time
|
||||
/// format specifications.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub template: Option<String>,
|
||||
/// Encryption key fingerprint
|
||||
///
|
||||
/// If set, encrypt all data using the specified key.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encrypt: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
21
pbs-api-types/src/tape/media_status.rs
Normal file
21
pbs-api-types/src/tape/media_status.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
/// Media status
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Media Status
|
||||
pub enum MediaStatus {
|
||||
/// Media is ready to be written
|
||||
Writable,
|
||||
/// Media is full (contains data)
|
||||
Full,
|
||||
/// Media is marked as unknown, needs rescan
|
||||
Unknown,
|
||||
/// Media is marked as damaged
|
||||
Damaged,
|
||||
/// Media is marked as retired
|
||||
Retired,
|
||||
}
|
92
pbs-api-types/src/tape/mod.rs
Normal file
92
pbs-api-types/src/tape/mod.rs
Normal file
@ -0,0 +1,92 @@
|
||||
//! Types for tape backup API
|
||||
|
||||
mod device;
|
||||
pub use device::*;
|
||||
|
||||
mod changer;
|
||||
pub use changer::*;
|
||||
|
||||
mod drive;
|
||||
pub use drive::*;
|
||||
|
||||
mod media_pool;
|
||||
pub use media_pool::*;
|
||||
|
||||
mod media_status;
|
||||
pub use media_status::*;
|
||||
|
||||
mod media_location;
|
||||
|
||||
pub use media_location::*;
|
||||
|
||||
mod media;
|
||||
pub use media::*;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
|
||||
}
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||
|
||||
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
|
||||
StringSchema::new("Tape encryption key fingerprint (sha256).")
|
||||
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
|
||||
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
|
||||
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||
.type_text("store:[ns/namespace/...]type/id/time")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media": {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media-set": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-type": {
|
||||
type: BackupType,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Content list filter parameters
|
||||
pub struct MediaContentListFilter {
|
||||
pub pool: Option<String>,
|
||||
pub label_text: Option<String>,
|
||||
pub media: Option<Uuid>,
|
||||
pub media_set: Option<Uuid>,
|
||||
pub backup_type: Option<BackupType>,
|
||||
pub backup_id: Option<String>,
|
||||
}
|
168
pbs-api-types/src/traffic_control.rs
Normal file
168
pbs-api-types/src/traffic_control.rs
Normal file
@ -0,0 +1,168 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
||||
StringSchema::new("Timeframe to specify when the rule is active.")
|
||||
.format(&DAILY_DURATION_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"rate-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"rate-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Rate Limit Configuration
|
||||
pub struct RateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_out: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_out: Option<HumanByte>,
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
|
||||
Self {
|
||||
rate_in: rate,
|
||||
burst_in: burst,
|
||||
rate_out: rate,
|
||||
burst_out: burst,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a [RateLimitConfig] from a [ClientRateLimitConfig]
|
||||
pub fn from_client_config(limit: ClientRateLimitConfig) -> Self {
|
||||
Self::with_same_inout(limit.rate, limit.burst)
|
||||
}
|
||||
}
|
||||
|
||||
const CLIENT_RATE_LIMIT_SCHEMA: Schema = HumanByte::API_SCHEMA
|
||||
.unwrap_string_schema_cloned()
|
||||
.description("Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).")
|
||||
.schema();
|
||||
|
||||
const CLIENT_BURST_SCHEMA: Schema = HumanByte::API_SCHEMA
|
||||
.unwrap_string_schema_cloned()
|
||||
.description("Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
rate: {
|
||||
schema: CLIENT_RATE_LIMIT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
burst: {
|
||||
schema: CLIENT_BURST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Client Rate Limit Configuration
|
||||
pub struct ClientRateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
rate: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
burst: Option<HumanByte>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: TRAFFIC_CONTROL_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
network: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: CIDR_SCHEMA,
|
||||
},
|
||||
},
|
||||
timeframe: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule
|
||||
pub struct TrafficControlRule {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Rule applies to Source IPs within this networks
|
||||
pub network: Vec<String>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
// fixme: expose this?
|
||||
// /// Bandwidth is shared across all connections
|
||||
// #[serde(skip_serializing_if="Option::is_none")]
|
||||
// pub shared: Option<bool>,
|
||||
/// Enable the rule at specific times
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub timeframe: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TrafficControlRule,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule config with current rates
|
||||
pub struct TrafficControlCurrentRate {
|
||||
#[serde(flatten)]
|
||||
pub config: TrafficControlRule,
|
||||
/// Current ingress rate in bytes/second
|
||||
pub cur_rate_in: u64,
|
||||
/// Current egress rate in bytes/second
|
||||
pub cur_rate_out: u64,
|
||||
}
|
226
pbs-api-types/src/user.rs
Normal file
226
pbs-api-types/src/user.rs
Normal file
@ -0,0 +1,226 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Enable the account (default). You can set this to '0' to disable the account.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Account expiration date (seconds since epoch). '0' means no expiration date.",
|
||||
)
|
||||
.default(0)
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
tokens: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "List of user's API tokens.",
|
||||
items: {
|
||||
type: ApiToken
|
||||
},
|
||||
},
|
||||
"totp-locked": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
description: "True if the user is currently locked out of TOTP factors",
|
||||
},
|
||||
"tfa-locked-until": {
|
||||
optional: true,
|
||||
description: "Contains a timestamp until when a user is locked out of 2nd factors",
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// User properties with added list of ApiTokens
|
||||
pub struct UserWithTokens {
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub tokens: Vec<ApiToken>,
|
||||
#[serde(skip_serializing_if = "bool_is_false", default)]
|
||||
pub totp_locked: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tfa_locked_until: Option<i64>,
|
||||
}
|
||||
|
||||
fn bool_is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
tokenid: {
|
||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ApiToken properties.
|
||||
pub struct ApiToken {
|
||||
pub tokenid: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
}
|
||||
|
||||
impl ApiToken {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
|
||||
/// User properties.
|
||||
pub struct User {
|
||||
#[updater(skip)]
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
190
pbs-api-types/src/version.rs
Normal file
190
pbs-api-types/src/version.rs
Normal file
@ -0,0 +1,190 @@
|
||||
//! Defines the types for the api version info endpoint
|
||||
use std::cmp::Ordering;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{format_err, Context};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(
|
||||
description: "Api version information",
|
||||
properties: {
|
||||
"version": {
|
||||
description: "Version 'major.minor'",
|
||||
type: String,
|
||||
},
|
||||
"release": {
|
||||
description: "Version release",
|
||||
type: String,
|
||||
},
|
||||
"repoid": {
|
||||
description: "Version repository id",
|
||||
type: String,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub struct ApiVersionInfo {
|
||||
pub version: String,
|
||||
pub release: String,
|
||||
pub repoid: String,
|
||||
}
|
||||
|
||||
pub type ApiVersionMajor = u64;
|
||||
pub type ApiVersionMinor = u64;
|
||||
pub type ApiVersionRelease = u64;
|
||||
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct ApiVersion {
|
||||
pub major: ApiVersionMajor,
|
||||
pub minor: ApiVersionMinor,
|
||||
pub release: ApiVersionRelease,
|
||||
}
|
||||
|
||||
impl TryFrom<ApiVersionInfo> for ApiVersion {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: ApiVersionInfo) -> Result<Self, Self::Error> {
|
||||
let (major, minor) = value
|
||||
.version
|
||||
.split_once('.')
|
||||
.ok_or_else(|| format_err!("malformed API version {}", value.version))?;
|
||||
|
||||
let major: ApiVersionMajor = major
|
||||
.parse()
|
||||
.with_context(|| "failed to parse major version")?;
|
||||
let minor: ApiVersionMinor = minor
|
||||
.parse()
|
||||
.with_context(|| "failed to parse minor version")?;
|
||||
let release: ApiVersionRelease = value
|
||||
.release
|
||||
.parse()
|
||||
.with_context(|| "failed to parse release version")?;
|
||||
|
||||
Ok(Self {
|
||||
major,
|
||||
minor,
|
||||
release,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for ApiVersion {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
let ordering = match (
|
||||
self.major.cmp(&other.major),
|
||||
self.minor.cmp(&other.minor),
|
||||
self.release.cmp(&other.release),
|
||||
) {
|
||||
(Ordering::Equal, Ordering::Equal, ordering) => ordering,
|
||||
(Ordering::Equal, ordering, _) => ordering,
|
||||
(ordering, _, _) => ordering,
|
||||
};
|
||||
|
||||
Some(ordering)
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiVersion {
|
||||
pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self {
|
||||
Self {
|
||||
major,
|
||||
minor,
|
||||
release,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn same_level_version_comarison() {
|
||||
let major_base = ApiVersion::new(2, 0, 0);
|
||||
let major_less = ApiVersion::new(1, 0, 0);
|
||||
let major_greater = ApiVersion::new(3, 0, 0);
|
||||
|
||||
let minor_base = ApiVersion::new(2, 2, 0);
|
||||
let minor_less = ApiVersion::new(2, 1, 0);
|
||||
let minor_greater = ApiVersion::new(2, 3, 0);
|
||||
|
||||
let release_base = ApiVersion::new(2, 2, 2);
|
||||
let release_less = ApiVersion::new(2, 2, 1);
|
||||
let release_greater = ApiVersion::new(2, 2, 3);
|
||||
|
||||
assert!(major_base == major_base);
|
||||
assert!(minor_base == minor_base);
|
||||
assert!(release_base == release_base);
|
||||
|
||||
assert!(major_base > major_less);
|
||||
assert!(major_base >= major_less);
|
||||
assert!(major_base != major_less);
|
||||
|
||||
assert!(major_base < major_greater);
|
||||
assert!(major_base <= major_greater);
|
||||
assert!(major_base != major_greater);
|
||||
|
||||
assert!(minor_base > minor_less);
|
||||
assert!(minor_base >= minor_less);
|
||||
assert!(minor_base != minor_less);
|
||||
|
||||
assert!(minor_base < minor_greater);
|
||||
assert!(minor_base <= minor_greater);
|
||||
assert!(minor_base != minor_greater);
|
||||
|
||||
assert!(release_base > release_less);
|
||||
assert!(release_base >= release_less);
|
||||
assert!(release_base != release_less);
|
||||
|
||||
assert!(release_base < release_greater);
|
||||
assert!(release_base <= release_greater);
|
||||
assert!(release_base != release_greater);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mixed_level_version_comarison() {
|
||||
let major_base = ApiVersion::new(2, 0, 0);
|
||||
let major_less = ApiVersion::new(1, 0, 0);
|
||||
let major_greater = ApiVersion::new(3, 0, 0);
|
||||
|
||||
let minor_base = ApiVersion::new(2, 2, 0);
|
||||
let minor_less = ApiVersion::new(2, 1, 0);
|
||||
let minor_greater = ApiVersion::new(2, 3, 0);
|
||||
|
||||
let release_base = ApiVersion::new(2, 2, 2);
|
||||
let release_less = ApiVersion::new(2, 2, 1);
|
||||
let release_greater = ApiVersion::new(2, 2, 3);
|
||||
|
||||
assert!(major_base < minor_base);
|
||||
assert!(major_base < minor_less);
|
||||
assert!(major_base < minor_greater);
|
||||
|
||||
assert!(major_base < release_base);
|
||||
assert!(major_base < release_less);
|
||||
assert!(major_base < release_greater);
|
||||
|
||||
assert!(major_less < minor_base);
|
||||
assert!(major_less < minor_less);
|
||||
assert!(major_less < minor_greater);
|
||||
|
||||
assert!(major_less < release_base);
|
||||
assert!(major_less < release_less);
|
||||
assert!(major_less < release_greater);
|
||||
|
||||
assert!(major_greater > minor_base);
|
||||
assert!(major_greater > minor_less);
|
||||
assert!(major_greater > minor_greater);
|
||||
|
||||
assert!(major_greater > release_base);
|
||||
assert!(major_greater > release_less);
|
||||
assert!(major_greater > release_greater);
|
||||
|
||||
assert!(minor_base < release_base);
|
||||
assert!(minor_base < release_less);
|
||||
assert!(minor_base < release_greater);
|
||||
|
||||
assert!(minor_greater > release_base);
|
||||
assert!(minor_greater > release_less);
|
||||
assert!(minor_greater > release_greater);
|
||||
|
||||
assert!(minor_less < release_base);
|
||||
assert!(minor_less < release_less);
|
||||
assert!(minor_less < release_greater);
|
||||
}
|
78
pbs-api-types/src/zfs.rs
Normal file
78
pbs-api-types/src/zfs.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
const_regex! {
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
}
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||
.schema();
|
||||
|
||||
#[api(default: "On")]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// ZStd
|
||||
ZStd,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
76
pbs-api-types/tests/group_filter_tests.rs
Normal file
76
pbs-api-types/tests/group_filter_tests.rs
Normal file
@ -0,0 +1,76 @@
|
||||
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_no_filters() {
|
||||
let group_filters = vec![];
|
||||
|
||||
let do_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_filters() {
|
||||
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
|
||||
|
||||
let do_backup = [
|
||||
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
|
||||
];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/109"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108", "vm/109"];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_and_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108"];
|
||||
|
||||
let dont_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user