From 8c74349b08f49a61d8b18c0b6f07a7e90be4db45 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Thu, 21 Apr 2022 15:04:59 +0200 Subject: [PATCH] api-types: add namespace to BackupGroup Make it easier by adding an helper accepting either group or directory Signed-off-by: Wolfgang Bumiller Signed-off-by: Thomas Lamprecht --- examples/download-speed.rs | 12 +- examples/upload-speed.rs | 12 +- pbs-api-types/src/datastore.rs | 125 +++++++++--- pbs-api-types/src/lib.rs | 20 +- pbs-client/src/backup_reader.rs | 13 +- pbs-client/src/backup_writer.rs | 13 +- pbs-client/src/tools/mod.rs | 1 + pbs-datastore/examples/ls-snapshots.rs | 2 +- pbs-datastore/src/backup_info.rs | 13 +- pbs-datastore/src/datastore.rs | 57 ++++-- proxmox-backup-client/src/benchmark.rs | 12 +- proxmox-backup-client/src/catalog.rs | 30 +-- proxmox-backup-client/src/main.rs | 75 +++++--- proxmox-backup-client/src/mount.rs | 17 +- proxmox-file-restore/src/main.rs | 24 +-- src/api2/admin/datastore.rs | 251 +++++++++++++------------ src/api2/backup/mod.rs | 15 +- src/api2/reader/mod.rs | 21 +-- src/api2/tape/backup.rs | 3 +- src/backup/verify.rs | 6 +- src/server/prune_job.rs | 3 +- src/server/pull.rs | 25 +-- 22 files changed, 431 insertions(+), 319 deletions(-) diff --git a/examples/download-speed.rs b/examples/download-speed.rs index dbd778af..d17d5d45 100644 --- a/examples/download-speed.rs +++ b/examples/download-speed.rs @@ -2,7 +2,7 @@ use std::io::Write; use anyhow::Error; -use pbs_api_types::{Authid, BackupType}; +use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_client::{BackupReader, HttpClient, HttpClientOptions}; pub struct DummyWriter { @@ -37,9 +37,13 @@ async fn run() -> Result<(), Error> { client, None, "store2", - BackupType::Host, - "elsa", - backup_time, + &( + BackupNamespace::root(), + BackupType::Host, + "elsa".to_string(), + backup_time, + ) + .into(), true, ) .await?; diff --git a/examples/upload-speed.rs b/examples/upload-speed.rs index bfd01799..26385816 100644 --- a/examples/upload-speed.rs +++ b/examples/upload-speed.rs @@ -1,6 +1,6 @@ use anyhow::Error; -use pbs_api_types::{Authid, BackupType}; +use pbs_api_types::{Authid, BackupNamespace, BackupType}; use pbs_client::{BackupWriter, HttpClient, HttpClientOptions}; async fn upload_speed() -> Result { @@ -21,9 +21,13 @@ async fn upload_speed() -> Result { client, None, datastore, - BackupType::Host, - "speedtest", - backup_time, + &( + BackupNamespace::root(), + BackupType::Host, + "speedtest".to_string(), + backup_time, + ) + .into(), false, true, ) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8c7ebad0..b2ef001b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; @@ -16,19 +16,24 @@ use crate::{ }; const_regex! { + pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); - pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); + pub GROUP_PATH_REGEX = concat!( + r"^(", BACKUP_NS_PATH_RE!(), r")?", + r"(", BACKUP_TYPE_RE!(), ")/", + r"(", BACKUP_ID_RE!(), r")$", + ); pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); - - pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$"); + pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); } @@ -640,7 +645,7 @@ impl BackupNamespace { /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every /// component. - fn display_as_path(&self) -> BackupNamespacePath { + pub fn display_as_path(&self) -> BackupNamespacePath { BackupNamespacePath(self) } @@ -775,6 +780,7 @@ impl std::cmp::PartialOrd for BackupType { #[api( properties: { + "backup-ns": { type: BackupNamespace }, "backup-type": { type: BackupType }, "backup-id": { schema: BACKUP_ID_SCHEMA }, }, @@ -783,6 +789,14 @@ impl std::cmp::PartialOrd for BackupType { #[serde(rename_all = "kebab-case")] /// A backup group (without a data store). pub struct BackupGroup { + /// An optional namespace this backup belongs to. + #[serde( + rename = "backup-ns", + skip_serializing_if = "BackupNamespace::is_root", + default + )] + pub ns: BackupNamespace, + /// Backup type. #[serde(rename = "backup-type")] pub ty: BackupType, @@ -793,8 +807,12 @@ pub struct BackupGroup { } impl BackupGroup { - pub fn new>(ty: BackupType, id: T) -> Self { - Self { ty, id: id.into() } + pub fn new>(ns: BackupNamespace, ty: BackupType, id: T) -> Self { + Self { + ns, + ty, + id: id.into(), + } } pub fn matches(&self, filter: &crate::GroupFilter) -> bool { @@ -820,21 +838,29 @@ impl AsRef for BackupGroup { } } -impl From<(BackupType, String)> for BackupGroup { - fn from(data: (BackupType, String)) -> Self { +impl From<(BackupNamespace, BackupType, String)> for BackupGroup { + #[inline] + fn from(data: (BackupNamespace, BackupType, String)) -> Self { Self { - ty: data.0, - id: data.1, + ns: data.0, + ty: data.1, + id: data.2, } } } impl std::cmp::Ord for BackupGroup { fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let ns_order = self.ns.cmp(&other.ns); + if ns_order != std::cmp::Ordering::Equal { + return ns_order; + } + let type_order = self.ty.cmp(&other.ty); if type_order != std::cmp::Ordering::Equal { return type_order; } + // try to compare IDs numerically let id_self = self.id.parse::(); let id_other = other.id.parse::(); @@ -855,7 +881,11 @@ impl std::cmp::PartialOrd for BackupGroup { impl fmt::Display for BackupGroup { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.ty, self.id) + if self.ns.is_root() { + write!(f, "{}/{}", self.ty, self.id) + } else { + write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id) + } } } @@ -871,8 +901,9 @@ impl std::str::FromStr for BackupGroup { .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; Ok(Self { - ty: cap.get(1).unwrap().as_str().parse()?, - id: cap.get(2).unwrap().as_str().to_owned(), + ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?, + ty: cap.get(2).unwrap().as_str().parse()?, + id: cap.get(3).unwrap().as_str().to_owned(), }) } } @@ -921,32 +952,44 @@ impl From<(BackupGroup, i64)> for BackupDir { } } -impl From<(BackupType, String, i64)> for BackupDir { - fn from(data: (BackupType, String, i64)) -> Self { +impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir { + fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self { Self { - group: (data.0, data.1).into(), - time: data.2, + group: (data.0, data.1, data.2).into(), + time: data.3, } } } impl BackupDir { - pub fn with_rfc3339(ty: BackupType, id: T, backup_time_string: &str) -> Result + pub fn with_rfc3339( + ns: BackupNamespace, + ty: BackupType, + id: T, + backup_time_string: &str, + ) -> Result where T: Into, { let time = proxmox_time::parse_rfc3339(&backup_time_string)?; - let group = BackupGroup::new(ty, id.into()); + let group = BackupGroup::new(ns, ty, id.into()); Ok(Self { group, time }) } + #[inline] pub fn ty(&self) -> BackupType { self.group.ty } + #[inline] pub fn id(&self) -> &str { &self.group.id } + + #[inline] + pub fn ns(&self) -> &BackupNamespace { + &self.group.ns + } } impl std::str::FromStr for BackupDir { @@ -960,22 +1003,56 @@ impl std::str::FromStr for BackupDir { .captures(path) .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; + let ns = match cap.get(1) { + Some(cap) => BackupNamespace::from_path(cap.as_str())?, + None => BackupNamespace::root(), + }; BackupDir::with_rfc3339( - cap.get(1).unwrap().as_str().parse()?, - cap.get(2).unwrap().as_str(), + ns, + cap.get(2).unwrap().as_str().parse()?, cap.get(3).unwrap().as_str(), + cap.get(4).unwrap().as_str(), ) } } -impl std::fmt::Display for BackupDir { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for BackupDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: log error? let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?; write!(f, "{}/{}", self.group, time) } } +/// Used when both a backup group or a directory can be valid. +pub enum BackupPart { + Group(BackupGroup), + Dir(BackupDir), +} + +impl std::str::FromStr for BackupPart { + type Err = Error; + + /// Parse a path which can be either a backup group or a snapshot dir. + fn from_str(path: &str) -> Result { + let cap = GROUP_OR_SNAPSHOT_PATH_REGEX + .captures(path) + .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; + + let ns = match cap.get(1) { + Some(cap) => BackupNamespace::from_path(cap.as_str())?, + None => BackupNamespace::root(), + }; + let ty = cap.get(2).unwrap().as_str().parse()?; + let id = cap.get(3).unwrap().as_str().to_string(); + + Ok(match cap.get(4) { + Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?), + None => BackupPart::Group((ns, ty, id).into()), + }) + } +} + #[api( properties: { "backup": { type: BackupDir }, diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 459a01f5..4f40a27f 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -34,14 +34,32 @@ macro_rules! BACKUP_NS_RE { ); } +#[rustfmt::skip] +#[macro_export] +macro_rules! BACKUP_NS_PATH_RE { + () => ( + concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!()) + ); +} + #[rustfmt::skip] #[macro_export] macro_rules! SNAPSHOT_PATH_REGEX_STR { () => ( - concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")") + concat!( + r"(", BACKUP_NS_PATH_RE!(), ")?", + r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", + ) ); } +#[macro_export] +macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { + () => { + concat!(SNAPSHOT_PATH_REGEX_STR!(), "?") + }; +} + mod acl; pub use acl::*; diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs index 99195492..fb3df2a9 100644 --- a/pbs-client/src/backup_reader.rs +++ b/pbs-client/src/backup_reader.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use futures::future::AbortHandle; use serde_json::{json, Value}; -use pbs_api_types::BackupType; +use pbs_api_types::BackupDir; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::DynamicIndexReader; @@ -47,15 +47,14 @@ impl BackupReader { client: HttpClient, crypt_config: Option>, datastore: &str, - backup_type: BackupType, - backup_id: &str, - backup_time: i64, + backup: &BackupDir, debug: bool, ) -> Result, Error> { let param = json!({ - "backup-type": backup_type, - "backup-id": backup_id, - "backup-time": backup_time, + "backup-ns": backup.ns(), + "backup-type": backup.ty(), + "backup-id": backup.id(), + "backup-time": backup.time, "store": datastore, "debug": debug, }); diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 17f7bdad..60b21a80 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{BackupType, HumanByte}; +use pbs_api_types::{BackupDir, HumanByte}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; @@ -86,16 +86,15 @@ impl BackupWriter { client: HttpClient, crypt_config: Option>, datastore: &str, - backup_type: BackupType, - backup_id: &str, - backup_time: i64, + backup: &BackupDir, debug: bool, benchmark: bool, ) -> Result, Error> { let param = json!({ - "backup-type": backup_type, - "backup-id": backup_id, - "backup-time": backup_time, + "backup-ns": backup.ns(), + "backup-type": backup.ty(), + "backup-id": backup.id(), + "backup-time": backup.time, "store": datastore, "debug": debug, "benchmark": benchmark diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 60239ee4..afe74849 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -293,6 +293,7 @@ pub async fn complete_server_file_name_do(param: &HashMap) -> Ve }; let query = json_object_to_query(json!({ + "backup-ns": snapshot.group.ns, "backup-type": snapshot.group.ty, "backup-id": snapshot.group.id, "backup-time": snapshot.time, diff --git a/pbs-datastore/examples/ls-snapshots.rs b/pbs-datastore/examples/ls-snapshots.rs index d87d4484..7b4445b2 100644 --- a/pbs-datastore/examples/ls-snapshots.rs +++ b/pbs-datastore/examples/ls-snapshots.rs @@ -12,7 +12,7 @@ fn run() -> Result<(), Error> { let store = unsafe { DataStore::open_path("", &base, None)? }; - for group in store.iter_backup_groups()? { + for group in store.iter_backup_groups(Default::default())? { let group = group?; println!("found group {}", group); diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 30275b22..94ff1717 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -217,11 +217,10 @@ impl From for pbs_api_types::BackupGroup { } } -impl std::fmt::Display for BackupGroup { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let backup_type = self.backup_type(); - let id = self.backup_id(); - write!(f, "{}/{}", backup_type, id) +impl fmt::Display for BackupGroup { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.group, f) } } @@ -446,8 +445,8 @@ impl From for pbs_api_types::BackupDir { } } -impl std::fmt::Display for BackupDir { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for BackupDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}/{}", self.dir.group, self.backup_time_string) } } diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 6df23c52..8a9f16b8 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -17,8 +17,8 @@ use proxmox_sys::WorkerTaskContext; use proxmox_sys::{task_log, task_warn}; use pbs_api_types::{ - Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, - HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID, + Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, + GarbageCollectionStatus, HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID, }; use pbs_config::ConfigVersionCache; @@ -348,6 +348,16 @@ impl DataStore { self.inner.chunk_store.base_path() } + pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf { + let mut path = self.base_path(); + path.reserve(ns.path_len()); + for part in ns.components() { + path.push("ns"); + path.push(part); + } + path + } + /// Cleanup a backup directory /// /// Removes all files not mentioned in the manifest. @@ -517,6 +527,10 @@ impl DataStore { ) -> Result<(Authid, DirLockGuard), Error> { // create intermediate path first: let mut full_path = self.base_path(); + for ns in backup_group.ns.components() { + full_path.push("ns"); + full_path.push(ns); + } full_path.push(backup_group.ty.as_str()); std::fs::create_dir_all(&full_path)?; @@ -579,8 +593,11 @@ impl DataStore { /// /// The iterated item is still a Result that can contain errors from rather unexptected FS or /// parsing errors. - pub fn iter_backup_groups(self: &Arc) -> Result { - ListGroups::new(Arc::clone(self)) + pub fn iter_backup_groups( + self: &Arc, + ns: BackupNamespace, + ) -> Result { + ListGroups::new(Arc::clone(self), ns) } /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results @@ -589,10 +606,11 @@ impl DataStore { /// logged. Can be useful in iterator chain commands pub fn iter_backup_groups_ok( self: &Arc, + ns: BackupNamespace, ) -> Result + 'static, Error> { let this = Arc::clone(self); Ok( - ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group { + ListGroups::new(Arc::clone(&self), ns)?.filter_map(move |group| match group { Ok(group) => Some(group), Err(err) => { log::error!("list groups error on datastore {} - {}", this.name(), err); @@ -605,8 +623,11 @@ impl DataStore { /// Get a in-memory vector for all top-level backup groups of a datatstore /// /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage - pub fn list_backup_groups(self: &Arc) -> Result, Error> { - ListGroups::new(Arc::clone(self))?.collect() + pub fn list_backup_groups( + self: &Arc, + ns: BackupNamespace, + ) -> Result, Error> { + ListGroups::new(Arc::clone(self), ns)?.collect() } pub fn list_images(&self) -> Result, Error> { @@ -1047,11 +1068,16 @@ impl DataStore { } /// Open a backup group from this datastore. - pub fn backup_group_from_parts(self: &Arc, ty: BackupType, id: T) -> BackupGroup + pub fn backup_group_from_parts( + self: &Arc, + ns: BackupNamespace, + ty: BackupType, + id: T, + ) -> BackupGroup where T: Into, { - self.backup_group((ty, id.into()).into()) + self.backup_group((ns, ty, id.into()).into()) } /// Open a backup group from this datastore by backup group path such as `vm/100`. @@ -1069,6 +1095,7 @@ impl DataStore { /// Open a snapshot (backup directory) from this datastore. pub fn backup_dir_from_parts( self: &Arc, + ns: BackupNamespace, ty: BackupType, id: T, time: i64, @@ -1076,7 +1103,7 @@ impl DataStore { where T: Into, { - self.backup_dir((ty, id.into(), time).into()) + self.backup_dir((ns, ty, id.into(), time).into()) } /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string. @@ -1143,15 +1170,19 @@ impl Iterator for ListSnapshots { /// A iterator for a (single) level of Backup Groups pub struct ListGroups { store: Arc, + ns: BackupNamespace, type_fd: proxmox_sys::fs::ReadDir, id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>, } impl ListGroups { - pub fn new(store: Arc) -> Result { + pub fn new(store: Arc, ns: BackupNamespace) -> Result { + let mut base_path = store.base_path().to_owned(); + base_path.push(ns.path()); Ok(ListGroups { - type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?, + type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?, store, + ns, id_state: None, }) } @@ -1183,7 +1214,7 @@ impl Iterator for ListGroups { if BACKUP_ID_REGEX.is_match(name) { return Some(Ok(BackupGroup::new( Arc::clone(&self.store), - (group_type, name.to_owned()).into(), + (self.ns.clone(), group_type, name.to_owned()).into(), ))); } } diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs index bc853b54..f03d0d87 100644 --- a/proxmox-backup-client/src/benchmark.rs +++ b/proxmox-backup-client/src/benchmark.rs @@ -14,7 +14,7 @@ use proxmox_router::{ }; use proxmox_schema::{api, ApiType, ReturnType}; -use pbs_api_types::BackupType; +use pbs_api_types::{BackupNamespace, BackupType}; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupRepository, BackupWriter}; use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig}; @@ -242,9 +242,13 @@ async fn test_upload_speed( client, crypt_config.clone(), repo.store(), - BackupType::Host, - "benchmark", - backup_time, + &( + BackupNamespace::root(), + BackupType::Host, + "benchmark".to_string(), + backup_time, + ) + .into(), false, true, ) diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index 46bc7223..fc4e731b 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -14,9 +14,9 @@ use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json::required_string_param; use crate::{ - api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot, - complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, - extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup, + complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name, + complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, + extract_repository_from_value, format_key_source, record_repository, BackupDir, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, }; @@ -68,16 +68,8 @@ async fn dump_catalog(param: Value) -> Result { let client = connect(&repo)?; - let client = BackupReader::start( - client, - crypt_config.clone(), - repo.store(), - snapshot.group.ty, - &snapshot.group.id, - snapshot.time, - true, - ) - .await?; + let client = + BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; @@ -153,13 +145,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let path = required_string_param(¶m, "snapshot")?; let archive_name = required_string_param(¶m, "archive-name")?; - let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { - let group: BackupGroup = path.parse()?; - api_datastore_latest_snapshot(&client, repo.store(), group).await? - } else { - let snapshot: BackupDir = path.parse()?; - (snapshot.group.ty, snapshot.group.id, snapshot.time) - }; + let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let crypto = crypto_parameters(¶m)?; @@ -186,9 +172,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { client, crypt_config.clone(), repo.store(), - backup_type, - &backup_id, - backup_time, + &backup_dir, true, ) .await?; diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index d7066b36..5b5a7915 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -7,6 +7,7 @@ use std::task::Context; use anyhow::{bail, format_err, Error}; use futures::stream::{StreamExt, TryStreamExt}; +use serde::Deserialize; use serde_json::{json, Value}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -22,10 +23,10 @@ use proxmox_time::{epoch_i64, strftime_local}; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, - PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, - BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, - TRAFFIC_CONTROL_RATE_SCHEMA, + Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode, + Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig, + SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::tools::{ @@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot( client: &HttpClient, store: &str, group: BackupGroup, -) -> Result<(BackupType, String, i64), Error> { +) -> Result { let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?; let mut list: Vec = serde_json::from_value(list)?; @@ -158,7 +159,20 @@ pub async fn api_datastore_latest_snapshot( list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time)); - Ok((group.ty, group.id, list[0].backup.time)) + Ok((group, list[0].backup.time).into()) +} + +pub async fn dir_or_last_from_group( + client: &HttpClient, + repo: &BackupRepository, + path: &str, +) -> Result { + match path.parse::()? { + BackupPart::Dir(dir) => Ok(dir), + BackupPart::Group(group) => { + api_datastore_latest_snapshot(&client, repo.store(), group).await + } + } } async fn backup_directory>( @@ -251,13 +265,12 @@ async fn list_backup_groups(param: Value) -> Result { record_repository(&repo); let render_group_path = |_v: &Value, record: &Value| -> Result { - let item: GroupListItem = serde_json::from_value(record.to_owned())?; - let group = BackupGroup::new(item.backup.ty, item.backup.id); - Ok(group.to_string()) + let item = GroupListItem::deserialize(record)?; + Ok(item.backup.to_string()) }; let render_last_backup = |_v: &Value, record: &Value| -> Result { - let item: GroupListItem = serde_json::from_value(record.to_owned())?; + let item = GroupListItem::deserialize(record)?; let snapshot = BackupDir { group: item.backup, time: item.last_backup, @@ -266,7 +279,7 @@ async fn list_backup_groups(param: Value) -> Result { }; let render_files = |_v: &Value, record: &Value| -> Result { - let item: GroupListItem = serde_json::from_value(record.to_owned())?; + let item = GroupListItem::deserialize(record)?; Ok(pbs_tools::format::render_backup_file_list(&item.files)) }; @@ -560,6 +573,10 @@ fn spawn_catalog_upload( optional: true, default: false, }, + "backup-ns": { + schema: BACKUP_NAMESPACE_SCHEMA, + optional: true, + }, "backup-type": { schema: BACKUP_TYPE_SCHEMA, optional: true, @@ -653,6 +670,14 @@ async fn create_backup( .as_str() .unwrap_or(proxmox_sys::nodename()); + let backup_namespace: BackupNamespace = match param.get("backup-ns") { + Some(ns) => ns + .as_str() + .ok_or_else(|| format_err!("bad namespace {:?}", ns))? + .parse()?, + None => BackupNamespace::root(), + }; + let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?; let include_dev = param["include-dev"].as_array(); @@ -775,12 +800,13 @@ async fn create_backup( let client = connect_rate_limited(&repo, rate_limit)?; record_repository(&repo); - println!( - "Starting backup: {}/{}/{}", + let snapshot = BackupDir::from(( + backup_namespace, backup_type, - backup_id, - pbs_datastore::BackupDir::backup_time_to_string(backup_time)? - ); + backup_id.to_owned(), + backup_time, + )); + println!("Starting backup: {snapshot}"); println!("Client name: {}", proxmox_sys::nodename()); @@ -827,9 +853,7 @@ async fn create_backup( client, crypt_config.clone(), repo.store(), - backup_type, - backup_id, - backup_time, + &snapshot, verbose, false, ) @@ -873,7 +897,6 @@ async fn create_backup( None }; - let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time)); let mut manifest = BackupManifest::new(snapshot); let mut catalog = None; @@ -1182,13 +1205,7 @@ async fn restore(param: Value) -> Result { let path = json::required_string_param(¶m, "snapshot")?; - let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { - let group: BackupGroup = path.parse()?; - api_datastore_latest_snapshot(&client, repo.store(), group).await? - } else { - let snapshot: BackupDir = path.parse()?; - (snapshot.group.ty, snapshot.group.id, snapshot.time) - }; + let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let target = json::required_string_param(¶m, "target")?; let target = if target == "-" { None } else { Some(target) }; @@ -1211,9 +1228,7 @@ async fn restore(param: Value) -> Result { client, crypt_config.clone(), repo.store(), - backup_type, - &backup_id, - backup_time, + &backup_dir, true, ) .await?; diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index 508a4f76..cfee5e18 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -18,7 +18,6 @@ use proxmox_schema::*; use proxmox_sys::fd::Fd; use proxmox_sys::sortable; -use pbs_api_types::{BackupDir, BackupGroup}; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_config::key_config::load_and_decrypt_key; @@ -29,8 +28,8 @@ use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json::required_string_param; use crate::{ - api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name, - complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value, + complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name, + complete_repository, connect, dir_or_last_from_group, extract_repository_from_value, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA, }; @@ -199,13 +198,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { record_repository(&repo); let path = required_string_param(¶m, "snapshot")?; - let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { - let group: BackupGroup = path.parse()?; - api_datastore_latest_snapshot(&client, repo.store(), group).await? - } else { - let snapshot: BackupDir = path.parse()?; - (snapshot.group.ty, snapshot.group.id, snapshot.time) - }; + let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let keyfile = param["keyfile"].as_str().map(PathBuf::from); let crypt_config = match keyfile { @@ -236,9 +229,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { client, crypt_config.clone(), repo.store(), - backup_type, - &backup_id, - backup_time, + &backup_dir, true, ) .await?; diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 6b5e65b9..1733f36b 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -102,16 +102,8 @@ async fn list_files( driver: Option, ) -> Result, Error> { let client = connect(&repo)?; - let client = BackupReader::start( - client, - crypt_config.clone(), - repo.store(), - snapshot.group.ty, - &snapshot.group.id, - snapshot.time, - true, - ) - .await?; + let client = + BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; @@ -409,16 +401,8 @@ async fn extract( }; let client = connect(&repo)?; - let client = BackupReader::start( - client, - crypt_config.clone(), - repo.store(), - snapshot.group.ty, - &snapshot.group.id, - snapshot.time, - true, - ) - .await?; + let client = + BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; let (manifest, _) = client.download_manifest().await?; match path { diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index dcc4e1c1..c9eabd3c 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -10,6 +10,7 @@ use anyhow::{bail, format_err, Error}; use futures::*; use hyper::http::request::Parts; use hyper::{header, Body, Response, StatusCode}; +use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; @@ -31,12 +32,13 @@ use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ - Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus, - GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, - SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, + Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem, + DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, + RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, + IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -54,7 +56,7 @@ use pbs_datastore::{ check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, StoreProgress, CATALOG_NAME, }; -use pbs_tools::json::{required_integer_param, required_string_param}; +use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; use crate::api2::node::rrd::create_value_from_rrd; @@ -168,7 +170,7 @@ pub fn list_groups( let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; datastore - .iter_backup_groups()? + .iter_backup_groups(Default::default())? // FIXME: Namespaces and recursion parameters! .try_fold(Vec::new(), |mut group_info, group| { let group = group?; let owner = match datastore.get_owner(group.as_ref()) { @@ -224,8 +226,10 @@ pub fn list_groups( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, + group: { + type: pbs_api_types::BackupGroup, + flatten: true, + }, }, }, access: { @@ -238,14 +242,12 @@ pub fn list_groups( /// Delete backup group including all snapshots. pub fn delete_group( store: String, - backup_type: BackupType, - backup_id: String, + group: pbs_api_types::BackupGroup, _info: &ApiMethod, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; @@ -261,9 +263,10 @@ pub fn delete_group( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, }, }, returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE, @@ -277,16 +280,14 @@ pub fn delete_group( /// List snapshot files. pub fn list_snapshot_files( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, _info: &ApiMethod, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; - let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let snapshot = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -306,9 +307,10 @@ pub fn list_snapshot_files( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, }, }, access: { @@ -321,16 +323,14 @@ pub fn list_snapshot_files( /// Delete backup snapshot. pub fn delete_snapshot( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, _info: &ApiMethod, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; - let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let snapshot = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -349,6 +349,10 @@ pub fn delete_snapshot( input: { properties: { store: { schema: DATASTORE_SCHEMA }, + "backup-ns": { + type: BackupNamespace, + optional: true, + }, "backup-type": { optional: true, type: BackupType, @@ -370,6 +374,7 @@ pub fn delete_snapshot( /// List backup snapshots. pub fn list_snapshots( store: String, + backup_ns: Option, backup_type: Option, backup_id: Option, _param: Value, @@ -384,21 +389,26 @@ pub fn list_snapshots( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; + let backup_ns = backup_ns.unwrap_or_default(); + // FIXME: filter also owner before collecting, for doing that nicely the owner should move into // backup group and provide an error free (Err -> None) accessor let groups = match (backup_type, backup_id) { (Some(backup_type), Some(backup_id)) => { - vec![datastore.backup_group_from_parts(backup_type, backup_id)] + vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)] } + // FIXME: Recursion (Some(backup_type), None) => datastore - .iter_backup_groups_ok()? + .iter_backup_groups_ok(backup_ns)? .filter(|group| group.backup_type() == backup_type) .collect(), + // FIXME: Recursion (None, Some(backup_id)) => datastore - .iter_backup_groups_ok()? + .iter_backup_groups_ok(backup_ns)? .filter(|group| group.backup_id() == backup_id) .collect(), - _ => datastore.list_backup_groups()?, + // FIXME: Recursion + (None, None) => datastore.list_backup_groups(backup_ns)?, }; let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { @@ -506,7 +516,7 @@ fn get_snapshots_count( filter_owner: Option<&Authid>, ) -> Result { store - .iter_backup_groups_ok()? + .iter_backup_groups_ok(Default::default())? // FIXME: Recurse! .filter(|group| { let owner = match store.get_owner(group.as_ref()) { Ok(owner) => owner, @@ -606,6 +616,10 @@ pub fn status( store: { schema: DATASTORE_SCHEMA, }, + "backup-ns": { + type: BackupNamespace, + optional: true, + }, "backup-type": { type: BackupType, optional: true, @@ -641,6 +655,7 @@ pub fn status( /// or all backups in the datastore. pub fn verify( store: String, + backup_ns: Option, backup_type: Option, backup_id: Option, backup_time: Option, @@ -658,13 +673,22 @@ pub fn verify( let mut backup_group = None; let mut worker_type = "verify"; + // FIXME: Recursion + // FIXME: Namespaces and worker ID, could this be an issue? + let backup_ns = backup_ns.unwrap_or_default(); + match (backup_type, backup_id, backup_time) { (Some(backup_type), Some(backup_id), Some(backup_time)) => { worker_id = format!( - "{}:{}/{}/{:08X}", - store, backup_type, backup_id, backup_time + "{}:{}/{}/{}/{:08X}", + store, + backup_ns.display_as_path(), + backup_type, + backup_id, + backup_time ); - let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let dir = + datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?; check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?; @@ -672,8 +696,14 @@ pub fn verify( worker_type = "verify_snapshot"; } (Some(backup_type), Some(backup_id), None) => { - worker_id = format!("{}:{}/{}", store, backup_type, backup_id); - let group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); + worker_id = format!( + "{}:{}/{}/{}", + store, + backup_ns.display_as_path(), + backup_type, + backup_id + ); + let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id)); check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; @@ -748,8 +778,10 @@ pub fn verify( #[api( input: { properties: { - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-type": { type: BackupType }, + group: { + type: pbs_api_types::BackupGroup, + flatten: true, + }, "dry-run": { optional: true, type: bool, @@ -772,8 +804,7 @@ pub fn verify( )] /// Prune a group on the datastore pub fn prune( - backup_id: String, - backup_type: BackupType, + group: pbs_api_types::BackupGroup, dry_run: bool, prune_options: PruneOptions, store: String, @@ -784,11 +815,11 @@ pub fn prune( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; - let group = datastore.backup_group_from_parts(backup_type, &backup_id); + let group = datastore.backup_group(group); check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?; - let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id); + let worker_id = format!("{}:{}", store, group); let mut prune_result = Vec::new(); @@ -828,10 +859,9 @@ pub fn prune( ); task_log!( worker, - "Starting prune on store \"{}\" group \"{}/{}\"", + "Starting prune on store \"{}\" group \"{}\"", store, - backup_type, - backup_id + group, ); } @@ -1076,11 +1106,7 @@ pub fn download_file( let file_name = required_string_param(¶m, "file-name")?.to_owned(); - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?.to_owned(); - let backup_time = required_integer_param(¶m, "backup-time")?; - - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?; check_priv_or_backup_owner( &datastore, @@ -1159,11 +1185,7 @@ pub fn download_file_decoded( let file_name = required_string_param(¶m, "file-name")?.to_owned(); - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?.to_owned(); - let backup_time = required_integer_param(¶m, "backup-time")?; - - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?; check_priv_or_backup_owner( &datastore, @@ -1285,11 +1307,7 @@ pub fn upload_backup_log( let file_name = CLIENT_LOG_BLOB_NAME; - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?; - let backup_time = required_integer_param(¶m, "backup-time")?; - - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let owner = datastore.get_owner(backup_dir.as_ref())?; @@ -1303,14 +1321,7 @@ pub fn upload_backup_log( bail!("backup already contains a log."); } - println!( - "Upload backup log to {}/{}/{}/{}/{}", - store, - backup_type, - backup_id, - backup_dir.backup_time_string(), - file_name - ); + println!("Upload backup log to {store}/{backup_dir}/{file_name}"); let data = req_body .map_err(Error::from) @@ -1335,9 +1346,10 @@ pub fn upload_backup_log( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, "filepath": { description: "Base64 encoded path.", type: String, @@ -1351,9 +1363,7 @@ pub fn upload_backup_log( /// Get the entries of the given path of the catalog pub fn catalog( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, filepath: String, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { @@ -1361,7 +1371,7 @@ pub fn catalog( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -1438,13 +1448,9 @@ pub fn pxar_file_download( let filepath = required_string_param(¶m, "filepath")?.to_owned(); - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?; - let backup_time = required_integer_param(¶m, "backup-time")?; - let tar = param["tar"].as_bool().unwrap_or(false); - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?; check_priv_or_backup_owner( &datastore, @@ -1617,8 +1623,10 @@ pub fn get_active_operations(store: String, _param: Value) -> Result Result Result { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?; @@ -1647,8 +1653,10 @@ pub fn get_group_notes( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, + backup_group: { + type: pbs_api_types::BackupGroup, + flatten: true, + }, notes: { description: "A multiline text.", }, @@ -1663,15 +1671,13 @@ pub fn get_group_notes( /// Set "notes" for a backup group pub fn set_group_notes( store: String, - backup_type: BackupType, - backup_id: String, + backup_group: pbs_api_types::BackupGroup, notes: String, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?; @@ -1685,9 +1691,10 @@ pub fn set_group_notes( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, }, }, access: { @@ -1697,15 +1704,13 @@ pub fn set_group_notes( /// Get "notes" for a specific backup pub fn get_notes( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -1725,9 +1730,10 @@ pub fn get_notes( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, notes: { description: "A multiline text.", }, @@ -1742,16 +1748,14 @@ pub fn get_notes( /// Set "notes" for a specific backup pub fn set_notes( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, notes: String, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -1773,9 +1777,10 @@ pub fn set_notes( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, }, }, access: { @@ -1785,15 +1790,13 @@ pub fn set_notes( /// Query protection for a specific backup pub fn get_protection( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -1809,9 +1812,10 @@ pub fn get_protection( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, - "backup-time": { schema: BACKUP_TIME_SCHEMA }, + backup_dir: { + type: pbs_api_types::BackupDir, + flatten: true, + }, protected: { description: "Enable/disable protection.", }, @@ -1826,16 +1830,14 @@ pub fn get_protection( /// En- or disable protection for a specific backup pub fn set_protection( store: String, - backup_type: BackupType, - backup_id: String, - backup_time: i64, + backup_dir: pbs_api_types::BackupDir, protected: bool, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; check_priv_or_backup_owner( &datastore, @@ -1851,8 +1853,10 @@ pub fn set_protection( input: { properties: { store: { schema: DATASTORE_SCHEMA }, - "backup-type": { type: BackupType }, - "backup-id": { schema: BACKUP_ID_SCHEMA }, + backup_group: { + type: pbs_api_types::BackupGroup, + flatten: true, + }, "new-owner": { type: Authid, }, @@ -1866,14 +1870,13 @@ pub fn set_protection( /// Change owner of a backup group pub fn set_backup_owner( store: String, - backup_type: BackupType, - backup_id: String, + backup_group: pbs_api_types::BackupGroup, new_owner: Authid, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; - let backup_group = datastore.backup_group_from_parts(backup_type, backup_id); + let backup_group = datastore.backup_group(backup_group); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 9effc494..5f29edc3 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -6,6 +6,7 @@ use hex::FromHex; use hyper::header::{HeaderValue, UPGRADE}; use hyper::http::request::Parts; use hyper::{Body, Request, Response, StatusCode}; +use serde::Deserialize; use serde_json::{json, Value}; use proxmox_router::list_subdirs_api_method; @@ -81,9 +82,7 @@ fn upgrade_to_backup_protocol( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?; - let backup_time = required_integer_param(¶m, "backup-time")?; + let backup_dir_arg = pbs_api_types::BackupDir::deserialize(¶m)?; let protocols = parts .headers @@ -102,13 +101,15 @@ fn upgrade_to_backup_protocol( ); } - let worker_id = format!("{}:{}/{}", store, backup_type, backup_id); + let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id()); let env_type = rpcenv.env_type(); - let backup_group = datastore.backup_group_from_parts(backup_type, backup_id); + let backup_group = datastore.backup_group(backup_dir_arg.group.clone()); - let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" { + let worker_type = if backup_group.backup_type() == BackupType::Host + && backup_group.backup_id() == "benchmark" + { if !benchmark { bail!("unable to run benchmark without --benchmark flags"); } @@ -152,7 +153,7 @@ fn upgrade_to_backup_protocol( } }; - let backup_dir = backup_group.backup_dir(backup_time)?; + let backup_dir = backup_group.backup_dir(backup_dir_arg.time)?; let _last_guard = if let Some(last) = &last_backup { if backup_dir.backup_time() <= last.backup_dir.backup_time() { diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index 6bde4ccb..25c02bfe 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -6,6 +6,7 @@ use hex::FromHex; use hyper::header::{self, HeaderValue, UPGRADE}; use hyper::http::request::Parts; use hyper::{Body, Request, Response, StatusCode}; +use serde::Deserialize; use serde_json::Value; use proxmox_router::{ @@ -16,15 +17,15 @@ use proxmox_schema::{BooleanSchema, ObjectSchema}; use proxmox_sys::sortable; use pbs_api_types::{ - Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_READ, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{archive_type, ArchiveType}; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; -use pbs_tools::json::{required_integer_param, required_string_param}; +use pbs_tools::json::required_string_param; use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_sys::fs::lock_dir_noblock_shared; @@ -89,9 +90,7 @@ fn upgrade_to_backup_reader_protocol( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; - let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; - let backup_id = required_string_param(¶m, "backup-id")?; - let backup_time = required_integer_param(¶m, "backup-time")?; + let backup_dir = pbs_api_types::BackupDir::deserialize(¶m)?; let protocols = parts .headers @@ -112,7 +111,7 @@ fn upgrade_to_backup_reader_protocol( let env_type = rpcenv.env_type(); - let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; + let backup_dir = datastore.backup_dir(backup_dir)?; if !priv_read { let owner = datastore.get_owner(backup_dir.as_ref())?; let correct_owner = owner == auth_id @@ -135,9 +134,9 @@ fn upgrade_to_backup_reader_protocol( let worker_id = format!( "{}:{}/{}/{:08X}", store, - backup_type, - backup_id, - backup_dir.backup_time() + backup_dir.backup_type(), + backup_dir.backup_id(), + backup_dir.backup_time(), ); WorkerTask::spawn( diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index e83ac1bc..59e0994b 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -408,7 +408,8 @@ fn backup_worker( let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?; - let mut group_list = datastore.list_backup_groups()?; + // FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here... + let mut group_list = datastore.list_backup_groups(Default::default())?; group_list.sort_unstable_by(|a, b| a.group().cmp(b.group())); diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 7d5d3539..c00aefbc 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -533,7 +533,11 @@ pub fn verify_all_backups( } }; - let mut list = match verify_worker.datastore.iter_backup_groups_ok() { + // FIXME: This should probably simply enable recursion (or the call have a recursion parameter) + let mut list = match verify_worker + .datastore + .iter_backup_groups_ok(Default::default()) + { Ok(list) => list .filter(|group| { !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark") diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 2f8461eb..0208fbc2 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -42,7 +42,8 @@ pub fn prune_datastore( let privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let has_privs = privs & PRIV_DATASTORE_MODIFY != 0; - for group in datastore.iter_backup_groups()? { + // FIXME: Namespaces and recursion! + for group in datastore.iter_backup_groups(Default::default())? { let group = group?; let list = group.list_backups()?; diff --git a/src/server/pull.rs b/src/server/pull.rs index 48eb5fde..8128873d 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -651,13 +651,11 @@ async fn pull_group( continue; } - let backup_time = snapshot.time; - - remote_snapshots.insert(backup_time); + remote_snapshots.insert(snapshot.time); if let Some(last_sync_time) = last_sync { - if last_sync_time > backup_time { - skip_info.update(backup_time); + if last_sync_time > snapshot.time { + skip_info.update(snapshot.time); continue; } } @@ -676,16 +674,8 @@ async fn pull_group( options, )?; - let reader = BackupReader::start( - new_client, - None, - params.source.store(), - snapshot.group.ty, - &snapshot.group.id, - backup_time, - true, - ) - .await?; + let reader = + BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?; let result = pull_snapshot_from( worker, @@ -757,6 +747,8 @@ pub async fn pull_store( // explicit create shared lock to prevent GC on newly created chunks let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; + // FIXME: Namespaces! AND: If we make this API call recurse down namespaces we need to do the + // same down in the `remove_vanished` case! let path = format!("api2/json/admin/datastore/{}/groups", params.source.store()); let mut result = client @@ -850,7 +842,8 @@ pub async fn pull_store( if params.remove_vanished { let result: Result<(), Error> = proxmox_lang::try_block!({ - for local_group in params.store.iter_backup_groups()? { + // FIXME: See above comment about namespaces & recursion + for local_group in params.store.iter_backup_groups(Default::default())? { let local_group = local_group?; if new_groups.contains(local_group.as_ref()) { continue;